^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * DMA driver for Xilinx Video DMA Engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Based on the Freescale DMA driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * core that provides high-bandwidth direct memory access between memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * and AXI4-Stream type video target peripherals. The core provides efficient
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * two dimensional DMA operations with independent asynchronous read (S2MM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * and write (MM2S) channel operation. It can be configured to have either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * one channel or two channels. If configured as two channels, one is to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * transmit to the video device (MM2S) and another is to receive from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * video device (S2MM). Initialization, status, interrupt and management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * registers are accessed through an AXI4-Lite slave interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * provides high-bandwidth one dimensional direct memory access between memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * and AXI4-Stream target peripherals. It supports one receive and one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * transmit channel, both of them optional at synthesis time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * Access (DMA) between a memory-mapped source address and a memory-mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * destination address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * The AXI Multichannel Direct Memory Access (AXI MCDMA) core is a soft
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * Xilinx IP that provides high-bandwidth direct memory access between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * memory and AXI4-Stream target peripherals. It provides scatter gather
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * (SG) interface with multiple channels independent configuration support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/dmapool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/dma/xilinx_dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <linux/iopoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <linux/of_dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <linux/io-64-nonatomic-lo-hi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include "../dmaengine.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /* Register/Descriptor Offsets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /* Control Registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define XILINX_DMA_REG_DMACR 0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define XILINX_DMA_DMACR_DELAY_MAX 0xff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define XILINX_DMA_DMACR_DELAY_SHIFT 24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define XILINX_DMA_DMACR_ERR_IRQ BIT(14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define XILINX_DMA_DMACR_MASTER_SHIFT 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define XILINX_DMA_DMACR_GENLOCK_EN BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define XILINX_DMA_DMACR_RESET BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define XILINX_DMA_DMACR_CIRC_EN BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define XILINX_DMA_DMACR_RUNSTOP BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define XILINX_DMA_DMACR_DELAY_MASK GENMASK(31, 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define XILINX_DMA_DMACR_FRAME_COUNT_MASK GENMASK(23, 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define XILINX_DMA_DMACR_MASTER_MASK GENMASK(11, 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define XILINX_DMA_REG_DMASR 0x0004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define XILINX_DMA_DMASR_ERR_IRQ BIT(14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define XILINX_DMA_DMASR_SG_MASK BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define XILINX_DMA_DMASR_IDLE BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define XILINX_DMA_DMASR_HALTED BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define XILINX_DMA_REG_CURDESC 0x0008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define XILINX_DMA_REG_TAILDESC 0x0010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define XILINX_DMA_REG_REG_INDEX 0x0014
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define XILINX_DMA_REG_FRMSTORE 0x0018
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define XILINX_DMA_REG_THRESHOLD 0x001c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define XILINX_DMA_REG_FRMPTR_STS 0x0024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define XILINX_DMA_REG_PARK_PTR 0x0028
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define XILINX_DMA_REG_VDMA_VERSION 0x002c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /* Register Direct Mode Registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define XILINX_DMA_REG_VSIZE 0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define XILINX_DMA_REG_HSIZE 0x0004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP 0x00ec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /* HW specific definitions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define XILINX_MCDMA_MAX_CHANS_PER_DEVICE 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #define XILINX_CDMA_MAX_CHANS_PER_DEVICE 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) (XILINX_DMA_DMASR_FRM_CNT_IRQ | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) XILINX_DMA_DMASR_DLY_CNT_IRQ | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) XILINX_DMA_DMASR_ERR_IRQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define XILINX_DMA_DMASR_ALL_ERR_MASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) (XILINX_DMA_DMASR_EOL_LATE_ERR | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) XILINX_DMA_DMASR_SOF_LATE_ERR | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) XILINX_DMA_DMASR_SG_DEC_ERR | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) XILINX_DMA_DMASR_SG_SLV_ERR | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) XILINX_DMA_DMASR_EOF_EARLY_ERR | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) XILINX_DMA_DMASR_SOF_EARLY_ERR | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) XILINX_DMA_DMASR_DMA_DEC_ERR | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) XILINX_DMA_DMASR_DMA_INT_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * Recoverable errors are DMA Internal error, SOF Early, EOF Early
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * is enabled in the h/w system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #define XILINX_DMA_DMASR_ERR_RECOVER_MASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) (XILINX_DMA_DMASR_SOF_LATE_ERR | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) XILINX_DMA_DMASR_EOF_EARLY_ERR | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) XILINX_DMA_DMASR_SOF_EARLY_ERR | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) XILINX_DMA_DMASR_DMA_INT_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) /* Axi VDMA Flush on Fsync bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #define XILINX_DMA_FLUSH_S2MM 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #define XILINX_DMA_FLUSH_MM2S 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) #define XILINX_DMA_FLUSH_BOTH 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /* Delay loop counter to prevent hardware failure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) #define XILINX_DMA_LOOP_COUNT 1000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /* AXI DMA Specific Registers/Offsets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) #define XILINX_DMA_REG_SRCDSTADDR 0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) #define XILINX_DMA_REG_BTT 0x28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /* AXI DMA Specific Masks/Bit fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #define XILINX_DMA_MAX_TRANS_LEN_MIN 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #define XILINX_DMA_MAX_TRANS_LEN_MAX 23
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) #define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) #define XILINX_DMA_CR_COALESCE_SHIFT 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) #define XILINX_DMA_BD_SOP BIT(27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) #define XILINX_DMA_BD_EOP BIT(26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #define XILINX_DMA_COALESCE_MAX 255
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) #define XILINX_DMA_NUM_DESCS 255
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) #define XILINX_DMA_NUM_APP_WORDS 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /* AXI CDMA Specific Registers/Offsets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #define XILINX_CDMA_REG_SRCADDR 0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #define XILINX_CDMA_REG_DSTADDR 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /* AXI CDMA Specific Masks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) #define XILINX_CDMA_CR_SGMODE BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) #define xilinx_prep_dma_addr_t(addr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) ((dma_addr_t)((u64)addr##_##msb << 32 | (addr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /* AXI MCDMA Specific Registers/Offsets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) #define XILINX_MCDMA_MM2S_CTRL_OFFSET 0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) #define XILINX_MCDMA_S2MM_CTRL_OFFSET 0x0500
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) #define XILINX_MCDMA_CHEN_OFFSET 0x0008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) #define XILINX_MCDMA_CH_ERR_OFFSET 0x0010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) #define XILINX_MCDMA_RXINT_SER_OFFSET 0x0020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) #define XILINX_MCDMA_TXINT_SER_OFFSET 0x0028
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) #define XILINX_MCDMA_CHAN_CR_OFFSET(x) (0x40 + (x) * 0x40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) #define XILINX_MCDMA_CHAN_SR_OFFSET(x) (0x44 + (x) * 0x40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) #define XILINX_MCDMA_CHAN_CDESC_OFFSET(x) (0x48 + (x) * 0x40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) #define XILINX_MCDMA_CHAN_TDESC_OFFSET(x) (0x50 + (x) * 0x40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /* AXI MCDMA Specific Masks/Shifts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) #define XILINX_MCDMA_COALESCE_SHIFT 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) #define XILINX_MCDMA_COALESCE_MAX 24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) #define XILINX_MCDMA_IRQ_ALL_MASK GENMASK(7, 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) #define XILINX_MCDMA_COALESCE_MASK GENMASK(23, 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) #define XILINX_MCDMA_CR_RUNSTOP_MASK BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) #define XILINX_MCDMA_IRQ_IOC_MASK BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) #define XILINX_MCDMA_IRQ_DELAY_MASK BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) #define XILINX_MCDMA_IRQ_ERR_MASK BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) #define XILINX_MCDMA_BD_EOP BIT(30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) #define XILINX_MCDMA_BD_SOP BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * struct xilinx_vdma_desc_hw - Hardware Descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * @next_desc: Next Descriptor Pointer @0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * @pad1: Reserved @0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * @buf_addr: Buffer address @0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * @buf_addr_msb: MSB of Buffer address @0x0C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * @vsize: Vertical Size @0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * @hsize: Horizontal Size @0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * @stride: Number of bytes between the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * pixels of each horizontal line @0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct xilinx_vdma_desc_hw {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) u32 next_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) u32 pad1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) u32 buf_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) u32 buf_addr_msb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) u32 vsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) u32 hsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) u32 stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) } __aligned(64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * @next_desc: Next Descriptor Pointer @0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * @buf_addr: Buffer address @0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * @buf_addr_msb: MSB of Buffer address @0x0C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * @reserved1: Reserved @0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * @reserved2: Reserved @0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * @control: Control field @0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * @status: Status field @0x1C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * @app: APP Fields @0x20 - 0x30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) struct xilinx_axidma_desc_hw {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) u32 next_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) u32 next_desc_msb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) u32 buf_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) u32 buf_addr_msb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) u32 reserved1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) u32 reserved2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) u32 control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) u32 app[XILINX_DMA_NUM_APP_WORDS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) } __aligned(64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * struct xilinx_aximcdma_desc_hw - Hardware Descriptor for AXI MCDMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * @next_desc: Next Descriptor Pointer @0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * @buf_addr: Buffer address @0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * @buf_addr_msb: MSB of Buffer address @0x0C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * @rsvd: Reserved field @0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * @control: Control Information field @0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * @status: Status field @0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * @sideband_status: Status of sideband signals @0x1C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * @app: APP Fields @0x20 - 0x30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) struct xilinx_aximcdma_desc_hw {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) u32 next_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) u32 next_desc_msb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) u32 buf_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) u32 buf_addr_msb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) u32 rsvd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) u32 control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) u32 sideband_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) u32 app[XILINX_DMA_NUM_APP_WORDS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) } __aligned(64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * struct xilinx_cdma_desc_hw - Hardware Descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * @next_desc: Next Descriptor Pointer @0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * @next_desc_msb: Next Descriptor Pointer MSB @0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * @src_addr: Source address @0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * @src_addr_msb: Source address MSB @0x0C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * @dest_addr: Destination address @0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * @dest_addr_msb: Destination address MSB @0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * @control: Control field @0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * @status: Status field @0x1C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) struct xilinx_cdma_desc_hw {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) u32 next_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) u32 next_desc_msb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) u32 src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) u32 src_addr_msb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) u32 dest_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) u32 dest_addr_msb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) u32 control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) } __aligned(64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * struct xilinx_vdma_tx_segment - Descriptor segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * @hw: Hardware descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * @node: Node in the descriptor segments list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * @phys: Physical address of segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) struct xilinx_vdma_tx_segment {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) struct xilinx_vdma_desc_hw hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) dma_addr_t phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) } __aligned(64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * struct xilinx_axidma_tx_segment - Descriptor segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * @hw: Hardware descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * @node: Node in the descriptor segments list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * @phys: Physical address of segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) struct xilinx_axidma_tx_segment {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) struct xilinx_axidma_desc_hw hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) dma_addr_t phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) } __aligned(64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * struct xilinx_aximcdma_tx_segment - Descriptor segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * @hw: Hardware descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * @node: Node in the descriptor segments list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * @phys: Physical address of segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) struct xilinx_aximcdma_tx_segment {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) struct xilinx_aximcdma_desc_hw hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) dma_addr_t phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) } __aligned(64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * struct xilinx_cdma_tx_segment - Descriptor segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * @hw: Hardware descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * @node: Node in the descriptor segments list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * @phys: Physical address of segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) struct xilinx_cdma_tx_segment {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) struct xilinx_cdma_desc_hw hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) dma_addr_t phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) } __aligned(64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * struct xilinx_dma_tx_descriptor - Per Transaction structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * @async_tx: Async transaction descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * @segments: TX segments list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * @node: Node in the channel descriptors list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * @cyclic: Check for cyclic transfers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * @err: Whether the descriptor has an error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * @residue: Residue of the completed descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) struct xilinx_dma_tx_descriptor {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) struct dma_async_tx_descriptor async_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct list_head segments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) bool cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) bool err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) u32 residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * struct xilinx_dma_chan - Driver specific DMA channel structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * @xdev: Driver specific device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * @ctrl_offset: Control registers offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * @desc_offset: TX descriptor registers offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * @lock: Descriptor operation lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * @pending_list: Descriptors waiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * @active_list: Descriptors ready to submit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * @done_list: Complete descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * @free_seg_list: Free descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * @common: DMA common channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * @desc_pool: Descriptors pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * @dev: The dma device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * @irq: Channel IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * @id: Channel ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * @direction: Transfer direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * @num_frms: Number of frames
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * @has_sg: Support scatter transfers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * @cyclic: Check for cyclic transfers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * @genlock: Support genlock mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * @err: Channel has errors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * @idle: Check for channel idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * @terminating: Check for channel being synchronized by user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * @tasklet: Cleanup work after irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * @config: Device configuration info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * @flush_on_fsync: Flush on Frame sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * @desc_pendingcount: Descriptor pending count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * @ext_addr: Indicates 64 bit addressing is supported by dma channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * @desc_submitcount: Descriptor h/w submitted count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * @seg_v: Statically allocated segments base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * @seg_mv: Statically allocated segments base for MCDMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) * @seg_p: Physical allocated segments base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * @cyclic_seg_v: Statically allocated segment base for cyclic transfers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) * @cyclic_seg_p: Physical allocated segments base for cyclic dma
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * @start_transfer: Differentiate b/w DMA IP's transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * @stop_transfer: Differentiate b/w DMA IP's quiesce
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * @tdest: TDEST value for mcdma
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * @has_vflip: S2MM vertical flip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) struct xilinx_dma_chan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) struct xilinx_dma_device *xdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) u32 ctrl_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) u32 desc_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) struct list_head pending_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) struct list_head active_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) struct list_head done_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) struct list_head free_seg_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) struct dma_chan common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) struct dma_pool *desc_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) enum dma_transfer_direction direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) int num_frms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) bool has_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) bool cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) bool genlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) bool err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) bool idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) bool terminating;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) struct tasklet_struct tasklet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) struct xilinx_vdma_config config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) bool flush_on_fsync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) u32 desc_pendingcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) bool ext_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) u32 desc_submitcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) struct xilinx_axidma_tx_segment *seg_v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) struct xilinx_aximcdma_tx_segment *seg_mv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) dma_addr_t seg_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) struct xilinx_axidma_tx_segment *cyclic_seg_v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) dma_addr_t cyclic_seg_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) void (*start_transfer)(struct xilinx_dma_chan *chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) int (*stop_transfer)(struct xilinx_dma_chan *chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) u16 tdest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) bool has_vflip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * enum xdma_ip_type - DMA IP type.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * @XDMA_TYPE_AXIDMA: Axi dma ip.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * @XDMA_TYPE_CDMA: Axi cdma ip.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * @XDMA_TYPE_VDMA: Axi vdma ip.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * @XDMA_TYPE_AXIMCDMA: Axi MCDMA ip.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) enum xdma_ip_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) XDMA_TYPE_AXIDMA = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) XDMA_TYPE_CDMA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) XDMA_TYPE_VDMA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) XDMA_TYPE_AXIMCDMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) struct xilinx_dma_config {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) enum xdma_ip_type dmatype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) struct clk **tx_clk, struct clk **txs_clk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) struct clk **rx_clk, struct clk **rxs_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) irqreturn_t (*irq_handler)(int irq, void *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) const int max_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * struct xilinx_dma_device - DMA device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * @regs: I/O mapped base address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * @dev: Device Structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * @common: DMA device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * @chan: Driver specific DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * @flush_on_fsync: Flush on frame sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * @ext_addr: Indicates 64 bit addressing is supported by dma device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * @pdev: Platform device structure pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * @dma_config: DMA config structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * @axi_clk: DMA Axi4-lite interace clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * @tx_clk: DMA mm2s clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * @txs_clk: DMA mm2s stream clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * @rx_clk: DMA s2mm clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * @rxs_clk: DMA s2mm stream clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * @s2mm_chan_id: DMA s2mm channel identifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * @mm2s_chan_id: DMA mm2s channel identifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * @max_buffer_len: Max buffer length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) struct xilinx_dma_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) void __iomem *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) struct dma_device common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) struct xilinx_dma_chan *chan[XILINX_MCDMA_MAX_CHANS_PER_DEVICE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) u32 flush_on_fsync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) bool ext_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) struct platform_device *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) const struct xilinx_dma_config *dma_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) struct clk *axi_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) struct clk *tx_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) struct clk *txs_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) struct clk *rx_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) struct clk *rxs_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) u32 s2mm_chan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) u32 mm2s_chan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) u32 max_buffer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) /* Macros */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) #define to_xilinx_chan(chan) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) container_of(chan, struct xilinx_dma_chan, common)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) #define to_dma_tx_descriptor(tx) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) readl_poll_timeout_atomic(chan->xdev->regs + chan->ctrl_offset + reg, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) val, cond, delay_us, timeout_us)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) /* IO accessors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) return ioread32(chan->xdev->regs + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) iowrite32(value, chan->xdev->regs + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) dma_write(chan, chan->desc_offset + reg, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) return dma_read(chan, chan->ctrl_offset + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) dma_write(chan, chan->ctrl_offset + reg, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) u32 clr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) u32 set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) * vdma_desc_write_64 - 64-bit descriptor write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * @chan: Driver specific VDMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * @reg: Register to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * @value_lsb: lower address of the descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) * @value_msb: upper address of the descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) * Since vdma driver is trying to write to a register offset which is not a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * instead of a single 64 bit register write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) u32 value_lsb, u32 value_msb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) /* Write the lsb 32 bits*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) /* Write the msb 32 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) dma_addr_t addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (chan->ext_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) dma_writeq(chan, reg, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) dma_ctrl_write(chan, reg, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) struct xilinx_axidma_desc_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) dma_addr_t buf_addr, size_t sg_used,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) size_t period_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) if (chan->ext_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) period_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) hw->buf_addr = buf_addr + sg_used + period_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) static inline void xilinx_aximcdma_buf(struct xilinx_dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) struct xilinx_aximcdma_desc_hw *hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) dma_addr_t buf_addr, size_t sg_used)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (chan->ext_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) hw->buf_addr = lower_32_bits(buf_addr + sg_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) hw->buf_addr = buf_addr + sg_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) /* -----------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * Descriptors and segments alloc and free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * xilinx_vdma_alloc_tx_segment - Allocate transaction segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) * @chan: Driver specific DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) * Return: The allocated segment on success and NULL on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) static struct xilinx_vdma_tx_segment *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) struct xilinx_vdma_tx_segment *segment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) dma_addr_t phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) if (!segment)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) segment->phys = phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) return segment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) * xilinx_cdma_alloc_tx_segment - Allocate transaction segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * @chan: Driver specific DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) * Return: The allocated segment on success and NULL on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) static struct xilinx_cdma_tx_segment *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) struct xilinx_cdma_tx_segment *segment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) dma_addr_t phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) if (!segment)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) segment->phys = phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) return segment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) * xilinx_axidma_alloc_tx_segment - Allocate transaction segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) * @chan: Driver specific DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * Return: The allocated segment on success and NULL on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) static struct xilinx_axidma_tx_segment *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) struct xilinx_axidma_tx_segment *segment = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) spin_lock_irqsave(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if (!list_empty(&chan->free_seg_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) segment = list_first_entry(&chan->free_seg_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) struct xilinx_axidma_tx_segment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) list_del(&segment->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) spin_unlock_irqrestore(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) if (!segment)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) dev_dbg(chan->dev, "Could not find free tx segment\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) return segment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) * xilinx_aximcdma_alloc_tx_segment - Allocate transaction segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) * @chan: Driver specific DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) * Return: The allocated segment on success and NULL on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) static struct xilinx_aximcdma_tx_segment *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) xilinx_aximcdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) struct xilinx_aximcdma_tx_segment *segment = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) spin_lock_irqsave(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (!list_empty(&chan->free_seg_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) segment = list_first_entry(&chan->free_seg_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) struct xilinx_aximcdma_tx_segment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) list_del(&segment->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) spin_unlock_irqrestore(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) return segment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) u32 next_desc = hw->next_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) u32 next_desc_msb = hw->next_desc_msb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) memset(hw, 0, sizeof(struct xilinx_axidma_desc_hw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) hw->next_desc = next_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) hw->next_desc_msb = next_desc_msb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) static void xilinx_mcdma_clean_hw_desc(struct xilinx_aximcdma_desc_hw *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) u32 next_desc = hw->next_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) u32 next_desc_msb = hw->next_desc_msb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) memset(hw, 0, sizeof(struct xilinx_aximcdma_desc_hw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) hw->next_desc = next_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) hw->next_desc_msb = next_desc_msb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) * xilinx_dma_free_tx_segment - Free transaction segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * @chan: Driver specific DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * @segment: DMA transaction segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) struct xilinx_axidma_tx_segment *segment)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) xilinx_dma_clean_hw_desc(&segment->hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) list_add_tail(&segment->node, &chan->free_seg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) * xilinx_mcdma_free_tx_segment - Free transaction segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) * @chan: Driver specific DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) * @segment: DMA transaction segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) static void xilinx_mcdma_free_tx_segment(struct xilinx_dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) struct xilinx_aximcdma_tx_segment *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) segment)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) xilinx_mcdma_clean_hw_desc(&segment->hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) list_add_tail(&segment->node, &chan->free_seg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) * xilinx_cdma_free_tx_segment - Free transaction segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) * @chan: Driver specific DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * @segment: DMA transaction segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) struct xilinx_cdma_tx_segment *segment)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) dma_pool_free(chan->desc_pool, segment, segment->phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) * xilinx_vdma_free_tx_segment - Free transaction segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) * @chan: Driver specific DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) * @segment: DMA transaction segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) struct xilinx_vdma_tx_segment *segment)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) dma_pool_free(chan->desc_pool, segment, segment->phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) * xilinx_dma_tx_descriptor - Allocate transaction descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) * @chan: Driver specific DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) * Return: The allocated descriptor on success and NULL on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) static struct xilinx_dma_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) struct xilinx_dma_tx_descriptor *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) desc = kzalloc(sizeof(*desc), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) INIT_LIST_HEAD(&desc->segments);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) return desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) * xilinx_dma_free_tx_descriptor - Free transaction descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) * @chan: Driver specific DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) * @desc: DMA transaction descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) struct xilinx_dma_tx_descriptor *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) struct xilinx_vdma_tx_segment *segment, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) struct xilinx_aximcdma_tx_segment *aximcdma_segment, *aximcdma_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) list_for_each_entry_safe(segment, next, &desc->segments, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) list_del(&segment->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) xilinx_vdma_free_tx_segment(chan, segment);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) list_for_each_entry_safe(cdma_segment, cdma_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) &desc->segments, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) list_del(&cdma_segment->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) xilinx_cdma_free_tx_segment(chan, cdma_segment);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) list_for_each_entry_safe(axidma_segment, axidma_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) &desc->segments, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) list_del(&axidma_segment->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) xilinx_dma_free_tx_segment(chan, axidma_segment);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) list_for_each_entry_safe(aximcdma_segment, aximcdma_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) &desc->segments, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) list_del(&aximcdma_segment->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) xilinx_mcdma_free_tx_segment(chan, aximcdma_segment);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) kfree(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) /* Required functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) * xilinx_dma_free_desc_list - Free descriptors list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) * @chan: Driver specific DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) * @list: List to parse and delete the descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) struct list_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) struct xilinx_dma_tx_descriptor *desc, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) list_for_each_entry_safe(desc, next, list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) list_del(&desc->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) xilinx_dma_free_tx_descriptor(chan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) * xilinx_dma_free_descriptors - Free channel descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) * @chan: Driver specific DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) spin_lock_irqsave(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) xilinx_dma_free_desc_list(chan, &chan->pending_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) xilinx_dma_free_desc_list(chan, &chan->done_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) xilinx_dma_free_desc_list(chan, &chan->active_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) spin_unlock_irqrestore(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) * xilinx_dma_free_chan_resources - Free channel resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) * @dchan: DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) dev_dbg(chan->dev, "Free all channel resources.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) xilinx_dma_free_descriptors(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) spin_lock_irqsave(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) INIT_LIST_HEAD(&chan->free_seg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) spin_unlock_irqrestore(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) /* Free memory that is allocated for BD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) XILINX_DMA_NUM_DESCS, chan->seg_v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) chan->seg_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) /* Free Memory that is allocated for cyclic DMA Mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) dma_free_coherent(chan->dev, sizeof(*chan->cyclic_seg_v),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) chan->cyclic_seg_v, chan->cyclic_seg_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) spin_lock_irqsave(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) INIT_LIST_HEAD(&chan->free_seg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) spin_unlock_irqrestore(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) /* Free memory that is allocated for BD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) dma_free_coherent(chan->dev, sizeof(*chan->seg_mv) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) XILINX_DMA_NUM_DESCS, chan->seg_mv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) chan->seg_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) dma_pool_destroy(chan->desc_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) chan->desc_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * xilinx_dma_get_residue - Compute residue for a given descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) * @chan: Driver specific dma channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) * @desc: dma transaction descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) * Return: The number of residue bytes for the descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) struct xilinx_dma_tx_descriptor *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) struct xilinx_cdma_tx_segment *cdma_seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) struct xilinx_axidma_tx_segment *axidma_seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) struct xilinx_aximcdma_tx_segment *aximcdma_seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) struct xilinx_cdma_desc_hw *cdma_hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) struct xilinx_axidma_desc_hw *axidma_hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) struct xilinx_aximcdma_desc_hw *aximcdma_hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) struct list_head *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) u32 residue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) list_for_each(entry, &desc->segments) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) cdma_seg = list_entry(entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) struct xilinx_cdma_tx_segment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) cdma_hw = &cdma_seg->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) residue += (cdma_hw->control - cdma_hw->status) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) chan->xdev->max_buffer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) } else if (chan->xdev->dma_config->dmatype ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) XDMA_TYPE_AXIDMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) axidma_seg = list_entry(entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) struct xilinx_axidma_tx_segment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) axidma_hw = &axidma_seg->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) residue += (axidma_hw->control - axidma_hw->status) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) chan->xdev->max_buffer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) aximcdma_seg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) list_entry(entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) struct xilinx_aximcdma_tx_segment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) aximcdma_hw = &aximcdma_seg->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) residue +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) (aximcdma_hw->control - aximcdma_hw->status) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) chan->xdev->max_buffer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) return residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) * xilinx_dma_chan_handle_cyclic - Cyclic dma callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) * @chan: Driver specific dma channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) * @desc: dma transaction descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) * @flags: flags for spin lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) struct xilinx_dma_tx_descriptor *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) unsigned long *flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) dma_async_tx_callback callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) void *callback_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) callback = desc->async_tx.callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) callback_param = desc->async_tx.callback_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) if (callback) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) spin_unlock_irqrestore(&chan->lock, *flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) callback(callback_param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) spin_lock_irqsave(&chan->lock, *flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) * xilinx_dma_chan_desc_cleanup - Clean channel descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) * @chan: Driver specific DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) struct xilinx_dma_tx_descriptor *desc, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) spin_lock_irqsave(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) list_for_each_entry_safe(desc, next, &chan->done_list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) struct dmaengine_result result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) if (desc->cyclic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) /* Remove from the list of running transactions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) list_del(&desc->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) if (unlikely(desc->err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) if (chan->direction == DMA_DEV_TO_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) result.result = DMA_TRANS_READ_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) result.result = DMA_TRANS_WRITE_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) result.result = DMA_TRANS_NOERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) result.residue = desc->residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) /* Run the link descriptor callback function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) spin_unlock_irqrestore(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) dmaengine_desc_get_callback_invoke(&desc->async_tx, &result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) spin_lock_irqsave(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) /* Run any dependencies, then free the descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) dma_run_dependencies(&desc->async_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) xilinx_dma_free_tx_descriptor(chan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) * While we ran a callback the user called a terminate function,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) * which takes care of cleaning up any remaining descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) if (chan->terminating)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) spin_unlock_irqrestore(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) * xilinx_dma_do_tasklet - Schedule completion tasklet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) * @t: Pointer to the Xilinx DMA channel structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) static void xilinx_dma_do_tasklet(struct tasklet_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) struct xilinx_dma_chan *chan = from_tasklet(chan, t, tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) xilinx_dma_chan_desc_cleanup(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) * xilinx_dma_alloc_chan_resources - Allocate channel resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) * @dchan: DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) * Return: '0' on success and failure value on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) /* Has this channel already been allocated? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) if (chan->desc_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) * We need the descriptor to be aligned to 64bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) * for meeting Xilinx VDMA specification requirement.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) /* Allocate the buffer descriptors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) chan->seg_v = dma_alloc_coherent(chan->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) sizeof(*chan->seg_v) * XILINX_DMA_NUM_DESCS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) &chan->seg_p, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) if (!chan->seg_v) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) dev_err(chan->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) "unable to allocate channel %d descriptors\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) chan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) * For cyclic DMA mode we need to program the tail Descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) * register with a value which is not a part of the BD chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) * so allocating a desc segment during channel allocation for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) * programming tail descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) chan->cyclic_seg_v = dma_alloc_coherent(chan->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) sizeof(*chan->cyclic_seg_v),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) &chan->cyclic_seg_p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) if (!chan->cyclic_seg_v) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) dev_err(chan->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) "unable to allocate desc segment for cyclic DMA\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) XILINX_DMA_NUM_DESCS, chan->seg_v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) chan->seg_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) chan->cyclic_seg_v->phys = chan->cyclic_seg_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) chan->seg_v[i].hw.next_desc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) ((i + 1) % XILINX_DMA_NUM_DESCS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) chan->seg_v[i].hw.next_desc_msb =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) ((i + 1) % XILINX_DMA_NUM_DESCS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) chan->seg_v[i].phys = chan->seg_p +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) sizeof(*chan->seg_v) * i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) list_add_tail(&chan->seg_v[i].node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) &chan->free_seg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) /* Allocate the buffer descriptors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) chan->seg_mv = dma_alloc_coherent(chan->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) sizeof(*chan->seg_mv) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) XILINX_DMA_NUM_DESCS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) &chan->seg_p, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) if (!chan->seg_mv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) dev_err(chan->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) "unable to allocate channel %d descriptors\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) chan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) chan->seg_mv[i].hw.next_desc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) lower_32_bits(chan->seg_p + sizeof(*chan->seg_mv) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) ((i + 1) % XILINX_DMA_NUM_DESCS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) chan->seg_mv[i].hw.next_desc_msb =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) upper_32_bits(chan->seg_p + sizeof(*chan->seg_mv) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) ((i + 1) % XILINX_DMA_NUM_DESCS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) chan->seg_mv[i].phys = chan->seg_p +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) sizeof(*chan->seg_mv) * i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) list_add_tail(&chan->seg_mv[i].node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) &chan->free_seg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) chan->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) sizeof(struct xilinx_cdma_tx_segment),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) __alignof__(struct xilinx_cdma_tx_segment),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) chan->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) sizeof(struct xilinx_vdma_tx_segment),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) __alignof__(struct xilinx_vdma_tx_segment),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) if (!chan->desc_pool &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) ((chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) dev_err(chan->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) "unable to allocate channel %d descriptor pool\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) chan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) dma_cookie_init(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) /* For AXI DMA resetting once channel will reset the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) * other channel as well so enable the interrupts here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) XILINX_DMA_DMAXR_ALL_IRQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) XILINX_CDMA_CR_SGMODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) * xilinx_dma_calc_copysize - Calculate the amount of data to copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) * @chan: Driver specific DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) * @size: Total data that needs to be copied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) * @done: Amount of data that has been already copied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) * Return: Amount of data that has to be copied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) static int xilinx_dma_calc_copysize(struct xilinx_dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) int size, int done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) size_t copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) copy = min_t(size_t, size - done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) chan->xdev->max_buffer_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) if ((copy + done < size) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) chan->xdev->common.copy_align) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) * If this is not the last descriptor, make sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) * the next one will be properly aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) copy = rounddown(copy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) (1 << chan->xdev->common.copy_align));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) return copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) * xilinx_dma_tx_status - Get DMA transaction status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) * @dchan: DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) * @cookie: Transaction identifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) * @txstate: Transaction state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) * Return: DMA transaction status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) dma_cookie_t cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) struct dma_tx_state *txstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) struct xilinx_dma_tx_descriptor *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) enum dma_status ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) u32 residue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) ret = dma_cookie_status(dchan, cookie, txstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) if (ret == DMA_COMPLETE || !txstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) spin_lock_irqsave(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) if (!list_empty(&chan->active_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) desc = list_last_entry(&chan->active_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) struct xilinx_dma_tx_descriptor, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) * VDMA and simple mode do not support residue reporting, so the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) * residue field will always be 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) residue = xilinx_dma_get_residue(chan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) spin_unlock_irqrestore(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) dma_set_residue(txstate, residue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) * xilinx_dma_stop_transfer - Halt DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) * @chan: Driver specific DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) * Return: '0' on success and failure value on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) /* Wait for the hardware to halt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) val & XILINX_DMA_DMASR_HALTED, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) XILINX_DMA_LOOP_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) * xilinx_cdma_stop_transfer - Wait for the current transfer to complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) * @chan: Driver specific DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) * Return: '0' on success and failure value on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) val & XILINX_DMA_DMASR_IDLE, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) XILINX_DMA_LOOP_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) * xilinx_dma_start - Start DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) * @chan: Driver specific DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) static void xilinx_dma_start(struct xilinx_dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) /* Wait for the hardware to start */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) !(val & XILINX_DMA_DMASR_HALTED), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) XILINX_DMA_LOOP_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) dev_err(chan->dev, "Cannot start channel %p: %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) chan->err = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) * xilinx_vdma_start_transfer - Starts VDMA transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) * @chan: Driver specific channel struct pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) struct xilinx_vdma_config *config = &chan->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) struct xilinx_dma_tx_descriptor *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) u32 reg, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) struct xilinx_vdma_tx_segment *segment, *last = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) /* This function was invoked with lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) if (chan->err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) if (!chan->idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) if (list_empty(&chan->pending_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) desc = list_first_entry(&chan->pending_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) struct xilinx_dma_tx_descriptor, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) /* Configure the hardware using info in the config structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) if (chan->has_vflip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) reg &= ~XILINX_VDMA_ENABLE_VERTICAL_FLIP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) reg |= config->vflip_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) dma_write(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) if (config->frm_cnt_en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) reg |= XILINX_DMA_DMACR_FRAMECNT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) /* If not parking, enable circular mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) if (config->park)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) reg &= ~XILINX_DMA_DMACR_CIRC_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) reg |= XILINX_DMA_DMACR_CIRC_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) j = chan->desc_submitcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) if (chan->direction == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) /* Start the hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) xilinx_dma_start(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) if (chan->err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) /* Start the transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) if (chan->desc_submitcount < chan->num_frms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) i = chan->desc_submitcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) list_for_each_entry(segment, &desc->segments, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) if (chan->ext_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) vdma_desc_write_64(chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) XILINX_VDMA_REG_START_ADDRESS_64(i++),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) segment->hw.buf_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) segment->hw.buf_addr_msb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) vdma_desc_write(chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) XILINX_VDMA_REG_START_ADDRESS(i++),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) segment->hw.buf_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) last = segment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) if (!last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) /* HW expects these parameters to be same for one transaction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) last->hw.stride);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) chan->desc_submitcount++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) chan->desc_pendingcount--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) list_del(&desc->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) list_add_tail(&desc->node, &chan->active_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) if (chan->desc_submitcount == chan->num_frms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) chan->desc_submitcount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) chan->idle = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) * xilinx_cdma_start_transfer - Starts cdma transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) * @chan: Driver specific channel struct pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) struct xilinx_cdma_tx_segment *tail_segment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) if (chan->err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) if (!chan->idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) if (list_empty(&chan->pending_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) head_desc = list_first_entry(&chan->pending_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) struct xilinx_dma_tx_descriptor, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) tail_desc = list_last_entry(&chan->pending_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) struct xilinx_dma_tx_descriptor, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) tail_segment = list_last_entry(&tail_desc->segments,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) struct xilinx_cdma_tx_segment, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) ctrl_reg |= chan->desc_pendingcount <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) XILINX_DMA_CR_COALESCE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) if (chan->has_sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) XILINX_CDMA_CR_SGMODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) XILINX_CDMA_CR_SGMODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) xilinx_write(chan, XILINX_DMA_REG_CURDESC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) head_desc->async_tx.phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) /* Update tail ptr register which will start the transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) tail_segment->phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) /* In simple mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) struct xilinx_cdma_tx_segment *segment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) struct xilinx_cdma_desc_hw *hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) segment = list_first_entry(&head_desc->segments,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) struct xilinx_cdma_tx_segment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) hw = &segment->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) xilinx_write(chan, XILINX_CDMA_REG_SRCADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) xilinx_prep_dma_addr_t(hw->src_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) xilinx_write(chan, XILINX_CDMA_REG_DSTADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) xilinx_prep_dma_addr_t(hw->dest_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) /* Start the transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) hw->control & chan->xdev->max_buffer_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) list_splice_tail_init(&chan->pending_list, &chan->active_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) chan->desc_pendingcount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) chan->idle = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) * xilinx_dma_start_transfer - Starts DMA transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) * @chan: Driver specific channel struct pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) struct xilinx_axidma_tx_segment *tail_segment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) if (chan->err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) if (list_empty(&chan->pending_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) if (!chan->idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) head_desc = list_first_entry(&chan->pending_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) struct xilinx_dma_tx_descriptor, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) tail_desc = list_last_entry(&chan->pending_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) struct xilinx_dma_tx_descriptor, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) tail_segment = list_last_entry(&tail_desc->segments,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) struct xilinx_axidma_tx_segment, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) reg &= ~XILINX_DMA_CR_COALESCE_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) reg |= chan->desc_pendingcount <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) XILINX_DMA_CR_COALESCE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) if (chan->has_sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) xilinx_write(chan, XILINX_DMA_REG_CURDESC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) head_desc->async_tx.phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) xilinx_dma_start(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) if (chan->err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) /* Start the transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) if (chan->has_sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) if (chan->cyclic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) chan->cyclic_seg_v->phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) tail_segment->phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) struct xilinx_axidma_tx_segment *segment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) struct xilinx_axidma_desc_hw *hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) segment = list_first_entry(&head_desc->segments,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) struct xilinx_axidma_tx_segment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) hw = &segment->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) xilinx_prep_dma_addr_t(hw->buf_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) /* Start the transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) hw->control & chan->xdev->max_buffer_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) list_splice_tail_init(&chan->pending_list, &chan->active_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) chan->desc_pendingcount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) chan->idle = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) * xilinx_mcdma_start_transfer - Starts MCDMA transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) * @chan: Driver specific channel struct pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) static void xilinx_mcdma_start_transfer(struct xilinx_dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) struct xilinx_aximcdma_tx_segment *tail_segment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) * lock has been held by calling functions, so we don't need it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) * to take it here again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) if (chan->err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) if (!chan->idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) if (list_empty(&chan->pending_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) head_desc = list_first_entry(&chan->pending_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) struct xilinx_dma_tx_descriptor, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) tail_desc = list_last_entry(&chan->pending_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) struct xilinx_dma_tx_descriptor, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) tail_segment = list_last_entry(&tail_desc->segments,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) struct xilinx_aximcdma_tx_segment, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) if (chan->desc_pendingcount <= XILINX_MCDMA_COALESCE_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) reg &= ~XILINX_MCDMA_COALESCE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) reg |= chan->desc_pendingcount <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) XILINX_MCDMA_COALESCE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) reg |= XILINX_MCDMA_IRQ_ALL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) /* Program current descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) xilinx_write(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET(chan->tdest),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) head_desc->async_tx.phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) /* Program channel enable register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) reg = dma_ctrl_read(chan, XILINX_MCDMA_CHEN_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) reg |= BIT(chan->tdest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) dma_ctrl_write(chan, XILINX_MCDMA_CHEN_OFFSET, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) /* Start the fetch of BDs for the channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) reg |= XILINX_MCDMA_CR_RUNSTOP_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) xilinx_dma_start(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) if (chan->err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) /* Start the transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) xilinx_write(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET(chan->tdest),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) tail_segment->phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) list_splice_tail_init(&chan->pending_list, &chan->active_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) chan->desc_pendingcount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) chan->idle = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) * xilinx_dma_issue_pending - Issue pending transactions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) * @dchan: DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) static void xilinx_dma_issue_pending(struct dma_chan *dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) spin_lock_irqsave(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) chan->start_transfer(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) spin_unlock_irqrestore(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) * xilinx_dma_complete_descriptor - Mark the active descriptor as complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) * @chan : xilinx DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) * CONTEXT: hardirq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) struct xilinx_dma_tx_descriptor *desc, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) /* This function was invoked with lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) if (list_empty(&chan->active_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) list_for_each_entry_safe(desc, next, &chan->active_list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) if (chan->has_sg && chan->xdev->dma_config->dmatype !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) XDMA_TYPE_VDMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) desc->residue = xilinx_dma_get_residue(chan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) desc->residue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) desc->err = chan->err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) list_del(&desc->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) if (!desc->cyclic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) dma_cookie_complete(&desc->async_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) list_add_tail(&desc->node, &chan->done_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) * xilinx_dma_reset - Reset DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) * @chan: Driver specific DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) * Return: '0' on success and failure value on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) /* Wait for the hardware to finish reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) !(tmp & XILINX_DMA_DMACR_RESET), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) XILINX_DMA_LOOP_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) dma_ctrl_read(chan, XILINX_DMA_REG_DMACR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) chan->err = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) chan->idle = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) chan->desc_pendingcount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) chan->desc_submitcount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) * @chan: Driver specific DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) * Return: '0' on success and failure value on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) /* Reset VDMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) err = xilinx_dma_reset(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) /* Enable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) XILINX_DMA_DMAXR_ALL_IRQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) * xilinx_mcdma_irq_handler - MCDMA Interrupt handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) * @irq: IRQ number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) * @data: Pointer to the Xilinx MCDMA channel structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) * Return: IRQ_HANDLED/IRQ_NONE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) static irqreturn_t xilinx_mcdma_irq_handler(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) struct xilinx_dma_chan *chan = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) u32 status, ser_offset, chan_sermask, chan_offset = 0, chan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) if (chan->direction == DMA_DEV_TO_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) ser_offset = XILINX_MCDMA_RXINT_SER_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) ser_offset = XILINX_MCDMA_TXINT_SER_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) /* Read the channel id raising the interrupt*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) chan_sermask = dma_ctrl_read(chan, ser_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) chan_id = ffs(chan_sermask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) if (!chan_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) if (chan->direction == DMA_DEV_TO_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) chan_offset = chan->xdev->dma_config->max_channels / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) chan_offset = chan_offset + (chan_id - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) chan = chan->xdev->chan[chan_offset];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) /* Read the status and ack the interrupts. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) status = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) if (!(status & XILINX_MCDMA_IRQ_ALL_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) dma_ctrl_write(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) status & XILINX_MCDMA_IRQ_ALL_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) if (status & XILINX_MCDMA_IRQ_ERR_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) dev_err(chan->dev, "Channel %p has errors %x cdr %x tdr %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) dma_ctrl_read(chan, XILINX_MCDMA_CH_ERR_OFFSET),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) (chan->tdest)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) dma_ctrl_read(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) (chan->tdest)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) chan->err = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) if (status & XILINX_MCDMA_IRQ_DELAY_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) * Device takes too long to do the transfer when user requires
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) * responsiveness.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) dev_dbg(chan->dev, "Inter-packet latency too long\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) if (status & XILINX_MCDMA_IRQ_IOC_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) spin_lock(&chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) xilinx_dma_complete_descriptor(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) chan->idle = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) chan->start_transfer(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) spin_unlock(&chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) tasklet_schedule(&chan->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) * xilinx_dma_irq_handler - DMA Interrupt handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) * @irq: IRQ number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) * @data: Pointer to the Xilinx DMA channel structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) * Return: IRQ_HANDLED/IRQ_NONE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) struct xilinx_dma_chan *chan = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) /* Read the status and ack the interrupts. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) status & XILINX_DMA_DMAXR_ALL_IRQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) if (status & XILINX_DMA_DMASR_ERR_IRQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) * error is recoverable, ignore it. Otherwise flag the error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) * Only recoverable errors can be cleared in the DMASR register,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) * make sure not to write to other error bits to 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) if (!chan->flush_on_fsync ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) dev_err(chan->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) "Channel %p has errors %x, cdr %x tdr %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) chan, errors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) chan->err = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) * Device takes too long to do the transfer when user requires
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) * responsiveness.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) dev_dbg(chan->dev, "Inter-packet latency too long\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) spin_lock(&chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) xilinx_dma_complete_descriptor(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) chan->idle = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) chan->start_transfer(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) spin_unlock(&chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) tasklet_schedule(&chan->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) * append_desc_queue - Queuing descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) * @chan: Driver specific dma channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) * @desc: dma transaction descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) static void append_desc_queue(struct xilinx_dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) struct xilinx_dma_tx_descriptor *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) struct xilinx_vdma_tx_segment *tail_segment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) struct xilinx_dma_tx_descriptor *tail_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) struct xilinx_axidma_tx_segment *axidma_tail_segment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) struct xilinx_aximcdma_tx_segment *aximcdma_tail_segment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) struct xilinx_cdma_tx_segment *cdma_tail_segment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) if (list_empty(&chan->pending_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) goto append;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) * Add the hardware descriptor to the chain of hardware descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) * that already exists in memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) tail_desc = list_last_entry(&chan->pending_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) struct xilinx_dma_tx_descriptor, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) tail_segment = list_last_entry(&tail_desc->segments,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) struct xilinx_vdma_tx_segment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) cdma_tail_segment = list_last_entry(&tail_desc->segments,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) struct xilinx_cdma_tx_segment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) axidma_tail_segment = list_last_entry(&tail_desc->segments,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) struct xilinx_axidma_tx_segment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) aximcdma_tail_segment =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) list_last_entry(&tail_desc->segments,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) struct xilinx_aximcdma_tx_segment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) aximcdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) * Add the software descriptor and all children to the list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) * of pending transactions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) append:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) list_add_tail(&desc->node, &chan->pending_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) chan->desc_pendingcount++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) && unlikely(chan->desc_pendingcount > chan->num_frms)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) dev_dbg(chan->dev, "desc pendingcount is too high\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) chan->desc_pendingcount = chan->num_frms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) * xilinx_dma_tx_submit - Submit DMA transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) * @tx: Async transaction descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) * Return: cookie value on success and failure value on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) if (chan->cyclic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) xilinx_dma_free_tx_descriptor(chan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) if (chan->err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) * If reset fails, need to hard reset the system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) * Channel is no longer functional
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) err = xilinx_dma_chan_reset(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) spin_lock_irqsave(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) cookie = dma_cookie_assign(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) /* Put this transaction onto the tail of the pending queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) append_desc_queue(chan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) if (desc->cyclic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) chan->cyclic = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) chan->terminating = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) spin_unlock_irqrestore(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) return cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) * DMA_SLAVE transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) * @dchan: DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) * @xt: Interleaved template pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) * @flags: transfer ack flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) * Return: Async transaction descriptor on success and NULL on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) struct dma_interleaved_template *xt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) struct xilinx_dma_tx_descriptor *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) struct xilinx_vdma_tx_segment *segment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) struct xilinx_vdma_desc_hw *hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) if (!is_slave_direction(xt->dir))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) if (!xt->numf || !xt->sgl[0].size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) if (xt->frame_size != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) /* Allocate a transaction descriptor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) desc = xilinx_dma_alloc_tx_descriptor(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) desc->async_tx.tx_submit = xilinx_dma_tx_submit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) async_tx_ack(&desc->async_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) /* Allocate the link descriptor from DMA pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) segment = xilinx_vdma_alloc_tx_segment(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) if (!segment)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) /* Fill in the hardware descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) hw = &segment->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) hw->vsize = xt->numf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) hw->hsize = xt->sgl[0].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) hw->stride |= chan->config.frm_dly <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) if (xt->dir != DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) if (chan->ext_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) hw->buf_addr = lower_32_bits(xt->dst_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) hw->buf_addr_msb = upper_32_bits(xt->dst_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) hw->buf_addr = xt->dst_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) if (chan->ext_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) hw->buf_addr = lower_32_bits(xt->src_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) hw->buf_addr_msb = upper_32_bits(xt->src_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) hw->buf_addr = xt->src_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) /* Insert the segment into the descriptor segments list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) list_add_tail(&segment->node, &desc->segments);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) /* Link the last hardware descriptor with the first. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) segment = list_first_entry(&desc->segments,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) struct xilinx_vdma_tx_segment, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) desc->async_tx.phys = segment->phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) return &desc->async_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) xilinx_dma_free_tx_descriptor(chan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) * @dchan: DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) * @dma_dst: destination address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) * @dma_src: source address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) * @len: transfer length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) * @flags: transfer ack flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) * Return: Async transaction descriptor on success and NULL on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) dma_addr_t dma_src, size_t len, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) struct xilinx_dma_tx_descriptor *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) struct xilinx_cdma_tx_segment *segment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) struct xilinx_cdma_desc_hw *hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) if (!len || len > chan->xdev->max_buffer_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) desc = xilinx_dma_alloc_tx_descriptor(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) desc->async_tx.tx_submit = xilinx_dma_tx_submit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) /* Allocate the link descriptor from DMA pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) segment = xilinx_cdma_alloc_tx_segment(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) if (!segment)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) hw = &segment->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) hw->control = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) hw->src_addr = dma_src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) hw->dest_addr = dma_dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) if (chan->ext_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) hw->src_addr_msb = upper_32_bits(dma_src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) hw->dest_addr_msb = upper_32_bits(dma_dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) /* Insert the segment into the descriptor segments list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) list_add_tail(&segment->node, &desc->segments);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) desc->async_tx.phys = segment->phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) hw->next_desc = segment->phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) return &desc->async_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) xilinx_dma_free_tx_descriptor(chan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) * @dchan: DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) * @sgl: scatterlist to transfer to/from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) * @sg_len: number of entries in @scatterlist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) * @direction: DMA direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) * @flags: transfer ack flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) * @context: APP words of the descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) * Return: Async transaction descriptor on success and NULL on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) enum dma_transfer_direction direction, unsigned long flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) struct xilinx_dma_tx_descriptor *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) struct xilinx_axidma_tx_segment *segment = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) u32 *app_w = (u32 *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) size_t copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) size_t sg_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) if (!is_slave_direction(direction))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) /* Allocate a transaction descriptor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) desc = xilinx_dma_alloc_tx_descriptor(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) desc->async_tx.tx_submit = xilinx_dma_tx_submit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) /* Build transactions using information in the scatter gather list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) for_each_sg(sgl, sg, sg_len, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) sg_used = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) /* Loop until the entire scatterlist entry is used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) while (sg_used < sg_dma_len(sg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) struct xilinx_axidma_desc_hw *hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) /* Get a free segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) segment = xilinx_axidma_alloc_tx_segment(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) if (!segment)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) * Calculate the maximum number of bytes to transfer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) * making sure it is less than the hw limit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) copy = xilinx_dma_calc_copysize(chan, sg_dma_len(sg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) sg_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) hw = &segment->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) /* Fill in the descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) xilinx_axidma_buf(chan, hw, sg_dma_address(sg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) sg_used, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) hw->control = copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) if (chan->direction == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) if (app_w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) memcpy(hw->app, app_w, sizeof(u32) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) XILINX_DMA_NUM_APP_WORDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) sg_used += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) * Insert the segment into the descriptor segments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) * list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) list_add_tail(&segment->node, &desc->segments);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) segment = list_first_entry(&desc->segments,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) struct xilinx_axidma_tx_segment, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) desc->async_tx.phys = segment->phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) /* For the last DMA_MEM_TO_DEV transfer, set EOP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) if (chan->direction == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) segment->hw.control |= XILINX_DMA_BD_SOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) segment = list_last_entry(&desc->segments,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) struct xilinx_axidma_tx_segment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) segment->hw.control |= XILINX_DMA_BD_EOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) return &desc->async_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) xilinx_dma_free_tx_descriptor(chan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) * @dchan: DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) * @buf_addr: Physical address of the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) * @buf_len: Total length of the cyclic buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) * @period_len: length of individual cyclic buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) * @direction: DMA direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) * @flags: transfer ack flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) * Return: Async transaction descriptor on success and NULL on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) size_t period_len, enum dma_transfer_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) struct xilinx_dma_tx_descriptor *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) size_t copy, sg_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) unsigned int num_periods;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) if (!period_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) num_periods = buf_len / period_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) if (!num_periods)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) if (!is_slave_direction(direction))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) /* Allocate a transaction descriptor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) desc = xilinx_dma_alloc_tx_descriptor(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) chan->direction = direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) desc->async_tx.tx_submit = xilinx_dma_tx_submit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) for (i = 0; i < num_periods; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) sg_used = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) while (sg_used < period_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) struct xilinx_axidma_desc_hw *hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) /* Get a free segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) segment = xilinx_axidma_alloc_tx_segment(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) if (!segment)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) * Calculate the maximum number of bytes to transfer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) * making sure it is less than the hw limit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) copy = xilinx_dma_calc_copysize(chan, period_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) sg_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) hw = &segment->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) xilinx_axidma_buf(chan, hw, buf_addr, sg_used,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) period_len * i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) hw->control = copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) if (prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) prev->hw.next_desc = segment->phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) prev = segment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) sg_used += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) * Insert the segment into the descriptor segments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) * list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) list_add_tail(&segment->node, &desc->segments);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) head_segment = list_first_entry(&desc->segments,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) struct xilinx_axidma_tx_segment, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) desc->async_tx.phys = head_segment->phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) desc->cyclic = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) segment = list_last_entry(&desc->segments,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) struct xilinx_axidma_tx_segment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) segment->hw.next_desc = (u32) head_segment->phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) /* For the last DMA_MEM_TO_DEV transfer, set EOP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) if (direction == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) head_segment->hw.control |= XILINX_DMA_BD_SOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) segment->hw.control |= XILINX_DMA_BD_EOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) return &desc->async_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) xilinx_dma_free_tx_descriptor(chan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) * xilinx_mcdma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) * @dchan: DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) * @sgl: scatterlist to transfer to/from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) * @sg_len: number of entries in @scatterlist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) * @direction: DMA direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) * @flags: transfer ack flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) * @context: APP words of the descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) * Return: Async transaction descriptor on success and NULL on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) xilinx_mcdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) unsigned int sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) enum dma_transfer_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) unsigned long flags, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) struct xilinx_dma_tx_descriptor *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) struct xilinx_aximcdma_tx_segment *segment = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) u32 *app_w = (u32 *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) size_t copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) size_t sg_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) if (!is_slave_direction(direction))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) /* Allocate a transaction descriptor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) desc = xilinx_dma_alloc_tx_descriptor(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) desc->async_tx.tx_submit = xilinx_dma_tx_submit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) /* Build transactions using information in the scatter gather list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) for_each_sg(sgl, sg, sg_len, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) sg_used = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) /* Loop until the entire scatterlist entry is used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) while (sg_used < sg_dma_len(sg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) struct xilinx_aximcdma_desc_hw *hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) /* Get a free segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) segment = xilinx_aximcdma_alloc_tx_segment(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) if (!segment)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) * Calculate the maximum number of bytes to transfer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) * making sure it is less than the hw limit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) copy = min_t(size_t, sg_dma_len(sg) - sg_used,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) chan->xdev->max_buffer_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) hw = &segment->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) /* Fill in the descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) xilinx_aximcdma_buf(chan, hw, sg_dma_address(sg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) sg_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) hw->control = copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) if (chan->direction == DMA_MEM_TO_DEV && app_w) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) memcpy(hw->app, app_w, sizeof(u32) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) XILINX_DMA_NUM_APP_WORDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) sg_used += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) * Insert the segment into the descriptor segments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) * list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) list_add_tail(&segment->node, &desc->segments);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) segment = list_first_entry(&desc->segments,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) struct xilinx_aximcdma_tx_segment, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) desc->async_tx.phys = segment->phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) /* For the last DMA_MEM_TO_DEV transfer, set EOP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) if (chan->direction == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) segment->hw.control |= XILINX_MCDMA_BD_SOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) segment = list_last_entry(&desc->segments,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) struct xilinx_aximcdma_tx_segment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) segment->hw.control |= XILINX_MCDMA_BD_EOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) return &desc->async_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) xilinx_dma_free_tx_descriptor(chan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) * xilinx_dma_terminate_all - Halt the channel and free descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) * @dchan: Driver specific DMA Channel pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) * Return: '0' always.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) static int xilinx_dma_terminate_all(struct dma_chan *dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) if (!chan->cyclic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) err = chan->stop_transfer(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) dev_err(chan->dev, "Cannot stop channel %p: %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) chan, dma_ctrl_read(chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) XILINX_DMA_REG_DMASR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) chan->err = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) xilinx_dma_chan_reset(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) /* Remove and free all of the descriptors in the lists */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) chan->terminating = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) xilinx_dma_free_descriptors(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) chan->idle = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) if (chan->cyclic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) chan->cyclic = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) XILINX_CDMA_CR_SGMODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) * xilinx_dma_channel_set_config - Configure VDMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) * Run-time configuration for Axi VDMA, supports:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) * . halt the channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) * . configure interrupt coalescing and inter-packet delay threshold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) * . start/stop parking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) * . enable genlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) * @dchan: DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) * @cfg: VDMA device configuration pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) * Return: '0' on success and failure value on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) struct xilinx_vdma_config *cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) u32 dmacr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) if (cfg->reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) return xilinx_dma_chan_reset(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) chan->config.frm_dly = cfg->frm_dly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) chan->config.park = cfg->park;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) /* genlock settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) chan->config.gen_lock = cfg->gen_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) chan->config.master = cfg->master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) dmacr &= ~XILINX_DMA_DMACR_GENLOCK_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) if (cfg->gen_lock && chan->genlock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) dmacr &= ~XILINX_DMA_DMACR_MASTER_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) chan->config.frm_cnt_en = cfg->frm_cnt_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) chan->config.vflip_en = cfg->vflip_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) if (cfg->park)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) chan->config.park_frm = cfg->park_frm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) chan->config.park_frm = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) chan->config.coalesc = cfg->coalesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) chan->config.delay = cfg->delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) dmacr &= ~XILINX_DMA_DMACR_FRAME_COUNT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) chan->config.coalesc = cfg->coalesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) dmacr &= ~XILINX_DMA_DMACR_DELAY_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) chan->config.delay = cfg->delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) /* FSync Source selection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) /* -----------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) * Probe and remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) * xilinx_dma_chan_remove - Per Channel remove function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) * @chan: Driver specific DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) /* Disable all interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) XILINX_DMA_DMAXR_ALL_IRQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) if (chan->irq > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) free_irq(chan->irq, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) tasklet_kill(&chan->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) list_del(&chan->common.device_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) struct clk **tx_clk, struct clk **rx_clk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) struct clk **sg_clk, struct clk **tmp_clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) *tmp_clk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) if (IS_ERR(*axi_clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) if (IS_ERR(*tx_clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) *tx_clk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) if (IS_ERR(*rx_clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) *rx_clk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) if (IS_ERR(*sg_clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) *sg_clk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) err = clk_prepare_enable(*axi_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) err = clk_prepare_enable(*tx_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) goto err_disable_axiclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) err = clk_prepare_enable(*rx_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) goto err_disable_txclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) err = clk_prepare_enable(*sg_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) dev_err(&pdev->dev, "failed to enable sg_clk (%d)\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) goto err_disable_rxclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) err_disable_rxclk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) clk_disable_unprepare(*rx_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) err_disable_txclk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) clk_disable_unprepare(*tx_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) err_disable_axiclk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) clk_disable_unprepare(*axi_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) struct clk **dev_clk, struct clk **tmp_clk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) struct clk **tmp1_clk, struct clk **tmp2_clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) *tmp_clk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) *tmp1_clk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) *tmp2_clk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) if (IS_ERR(*axi_clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) if (IS_ERR(*dev_clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) return dev_err_probe(&pdev->dev, PTR_ERR(*dev_clk), "failed to get dev_clk\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) err = clk_prepare_enable(*axi_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) err = clk_prepare_enable(*dev_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) dev_err(&pdev->dev, "failed to enable dev_clk (%d)\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) goto err_disable_axiclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) err_disable_axiclk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) clk_disable_unprepare(*axi_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) struct clk **tx_clk, struct clk **txs_clk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) struct clk **rx_clk, struct clk **rxs_clk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) if (IS_ERR(*axi_clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) if (IS_ERR(*tx_clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) *tx_clk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) if (IS_ERR(*txs_clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) *txs_clk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) if (IS_ERR(*rx_clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) *rx_clk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) if (IS_ERR(*rxs_clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) *rxs_clk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) err = clk_prepare_enable(*axi_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) err = clk_prepare_enable(*tx_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) goto err_disable_axiclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) err = clk_prepare_enable(*txs_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) dev_err(&pdev->dev, "failed to enable txs_clk (%d)\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) goto err_disable_txclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) err = clk_prepare_enable(*rx_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) goto err_disable_txsclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) err = clk_prepare_enable(*rxs_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) dev_err(&pdev->dev, "failed to enable rxs_clk (%d)\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) goto err_disable_rxclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) err_disable_rxclk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) clk_disable_unprepare(*rx_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) err_disable_txsclk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) clk_disable_unprepare(*txs_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) err_disable_txclk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) clk_disable_unprepare(*tx_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) err_disable_axiclk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) clk_disable_unprepare(*axi_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) clk_disable_unprepare(xdev->rxs_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) clk_disable_unprepare(xdev->rx_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) clk_disable_unprepare(xdev->txs_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) clk_disable_unprepare(xdev->tx_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) clk_disable_unprepare(xdev->axi_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) * xilinx_dma_chan_probe - Per Channel Probing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) * It get channel features from the device tree entry and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) * initialize special channel handling routines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) * @xdev: Driver specific device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) * @node: Device node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) * Return: '0' on success and failure value on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) struct device_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) struct xilinx_dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) bool has_dre = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) u32 value, width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) /* Allocate and initialize the channel structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) if (!chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) chan->dev = xdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) chan->xdev = xdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) chan->desc_pendingcount = 0x0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) chan->ext_addr = xdev->ext_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) /* This variable ensures that descriptors are not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) * Submitted when dma engine is in progress. This variable is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) * Added to avoid polling for a bit in the status register to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) * Know dma state in the driver hot path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) chan->idle = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) spin_lock_init(&chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) INIT_LIST_HEAD(&chan->pending_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) INIT_LIST_HEAD(&chan->done_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) INIT_LIST_HEAD(&chan->active_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) INIT_LIST_HEAD(&chan->free_seg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) /* Retrieve the channel properties from the device tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) has_dre = of_property_read_bool(node, "xlnx,include-dre");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) err = of_property_read_u32(node, "xlnx,datawidth", &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) dev_err(xdev->dev, "missing xlnx,datawidth property\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) width = value >> 3; /* Convert bits to bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) /* If data width is greater than 8 bytes, DRE is not in hw */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) if (width > 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) has_dre = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) if (!has_dre)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) xdev->common.copy_align = (enum dmaengine_alignment)fls(width - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) of_device_is_compatible(node, "xlnx,axi-cdma-channel")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) chan->direction = DMA_MEM_TO_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) chan->id = xdev->mm2s_chan_id++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) chan->tdest = chan->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) chan->config.park = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) chan->flush_on_fsync = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) } else if (of_device_is_compatible(node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) "xlnx,axi-vdma-s2mm-channel") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) of_device_is_compatible(node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) "xlnx,axi-dma-s2mm-channel")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) chan->direction = DMA_DEV_TO_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) chan->id = xdev->s2mm_chan_id++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) chan->tdest = chan->id - xdev->dma_config->max_channels / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) chan->has_vflip = of_property_read_bool(node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) "xlnx,enable-vert-flip");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) if (chan->has_vflip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) chan->config.vflip_en = dma_read(chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) XILINX_VDMA_ENABLE_VERTICAL_FLIP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) chan->ctrl_offset = XILINX_MCDMA_S2MM_CTRL_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) chan->config.park = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) chan->flush_on_fsync = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) dev_err(xdev->dev, "Invalid channel compatible node\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) /* Request the interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) chan->irq = irq_of_parse_and_map(node, chan->tdest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) err = request_irq(chan->irq, xdev->dma_config->irq_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) IRQF_SHARED, "xilinx-dma-controller", chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) chan->start_transfer = xilinx_dma_start_transfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) chan->stop_transfer = xilinx_dma_stop_transfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) chan->start_transfer = xilinx_mcdma_start_transfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) chan->stop_transfer = xilinx_dma_stop_transfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) chan->start_transfer = xilinx_cdma_start_transfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) chan->stop_transfer = xilinx_cdma_stop_transfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) chan->start_transfer = xilinx_vdma_start_transfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) chan->stop_transfer = xilinx_dma_stop_transfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) /* check if SG is enabled (only for AXIDMA, AXIMCDMA, and CDMA) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) if (xdev->dma_config->dmatype != XDMA_TYPE_VDMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) XILINX_DMA_DMASR_SG_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) chan->has_sg = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) dev_dbg(chan->dev, "ch %d: SG %s\n", chan->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) chan->has_sg ? "enabled" : "disabled");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) /* Initialize the tasklet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) tasklet_setup(&chan->tasklet, xilinx_dma_do_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) * Initialize the DMA channel and add it to the DMA engine channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) * list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) chan->common.device = &xdev->common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) list_add_tail(&chan->common.device_node, &xdev->common.channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) xdev->chan[chan->id] = chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) /* Reset the channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) err = xilinx_dma_chan_reset(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) dev_err(xdev->dev, "Reset channel failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) * xilinx_dma_child_probe - Per child node probe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) * It get number of dma-channels per child node from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) * device-tree and initializes all the channels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) * @xdev: Driver specific device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) * @node: Device node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) * Return: 0 always.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) struct device_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) u32 nr_channels = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) ret = of_property_read_u32(node, "dma-channels", &nr_channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA && ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) dev_warn(xdev->dev, "missing dma-channels property\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) for (i = 0; i < nr_channels; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) xilinx_dma_chan_probe(xdev, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) * of_dma_xilinx_xlate - Translation function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) * @dma_spec: Pointer to DMA specifier as found in the device tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) * @ofdma: Pointer to DMA controller data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) * Return: DMA channel pointer on success and NULL on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) struct of_dma *ofdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) struct xilinx_dma_device *xdev = ofdma->of_dma_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) int chan_id = dma_spec->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) if (chan_id >= xdev->dma_config->max_channels || !xdev->chan[chan_id])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) return dma_get_slave_channel(&xdev->chan[chan_id]->common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) static const struct xilinx_dma_config axidma_config = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) .dmatype = XDMA_TYPE_AXIDMA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) .clk_init = axidma_clk_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) .irq_handler = xilinx_dma_irq_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) .max_channels = XILINX_DMA_MAX_CHANS_PER_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) static const struct xilinx_dma_config aximcdma_config = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) .dmatype = XDMA_TYPE_AXIMCDMA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) .clk_init = axidma_clk_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) .irq_handler = xilinx_mcdma_irq_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) .max_channels = XILINX_MCDMA_MAX_CHANS_PER_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) static const struct xilinx_dma_config axicdma_config = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) .dmatype = XDMA_TYPE_CDMA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) .clk_init = axicdma_clk_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) .irq_handler = xilinx_dma_irq_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) .max_channels = XILINX_CDMA_MAX_CHANS_PER_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) static const struct xilinx_dma_config axivdma_config = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) .dmatype = XDMA_TYPE_VDMA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) .clk_init = axivdma_clk_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) .irq_handler = xilinx_dma_irq_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) .max_channels = XILINX_DMA_MAX_CHANS_PER_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) static const struct of_device_id xilinx_dma_of_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) { .compatible = "xlnx,axi-mcdma-1.00.a", .data = &aximcdma_config },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) * xilinx_dma_probe - Driver probe function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) * @pdev: Pointer to the platform_device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) * Return: '0' on success and failure value on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) static int xilinx_dma_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) int (*clk_init)(struct platform_device *, struct clk **, struct clk **,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) struct clk **, struct clk **, struct clk **)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) = axivdma_clk_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) struct device_node *node = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) struct xilinx_dma_device *xdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) struct device_node *child, *np = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) u32 num_frames, addr_width, len_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) /* Allocate and initialize the DMA engine structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) if (!xdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) xdev->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) if (np) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) const struct of_device_id *match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) match = of_match_node(xilinx_dma_of_ids, np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) if (match && match->data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) xdev->dma_config = match->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) clk_init = xdev->dma_config->clk_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) &xdev->rx_clk, &xdev->rxs_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) /* Request and map I/O memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) xdev->regs = devm_platform_ioremap_resource(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) if (IS_ERR(xdev->regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) return PTR_ERR(xdev->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) /* Retrieve the DMA engine properties from the device tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) xdev->s2mm_chan_id = xdev->dma_config->max_channels / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) if (!of_property_read_u32(node, "xlnx,sg-length-width",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) &len_width)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) dev_warn(xdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) "invalid xlnx,sg-length-width property value. Using default width\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) dev_warn(xdev->dev, "Please ensure that IP supports buffer length > 23 bits\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) xdev->max_buffer_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) GENMASK(len_width - 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) err = of_property_read_u32(node, "xlnx,num-fstores",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) &num_frames);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) dev_err(xdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) "missing xlnx,num-fstores property\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) err = of_property_read_u32(node, "xlnx,flush-fsync",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) &xdev->flush_on_fsync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) dev_warn(xdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) "missing xlnx,flush-fsync property\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) dev_warn(xdev->dev, "missing xlnx,addrwidth property\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) if (addr_width > 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) xdev->ext_addr = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) xdev->ext_addr = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) /* Set the dma mask bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) /* Initialize the DMA engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) xdev->common.dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) INIT_LIST_HEAD(&xdev->common.channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) xdev->common.device_alloc_chan_resources =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) xilinx_dma_alloc_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) xdev->common.device_free_chan_resources =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) xilinx_dma_free_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) xdev->common.device_terminate_all = xilinx_dma_terminate_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) xdev->common.device_tx_status = xilinx_dma_tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) xdev->common.device_issue_pending = xilinx_dma_issue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) xdev->common.device_prep_dma_cyclic =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) xilinx_dma_prep_dma_cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) /* Residue calculation is supported by only AXI DMA and CDMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) xdev->common.residue_granularity =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) DMA_RESIDUE_GRANULARITY_SEGMENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) /* Residue calculation is supported by only AXI DMA and CDMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) xdev->common.residue_granularity =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) DMA_RESIDUE_GRANULARITY_SEGMENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) xdev->common.device_prep_slave_sg = xilinx_mcdma_prep_slave_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) xdev->common.device_prep_interleaved_dma =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) xilinx_vdma_dma_prep_interleaved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) platform_set_drvdata(pdev, xdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) /* Initialize the channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) for_each_child_of_node(node, child) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) err = xilinx_dma_child_probe(xdev, child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) goto disable_clks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) for (i = 0; i < xdev->dma_config->max_channels; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) if (xdev->chan[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) xdev->chan[i]->num_frms = num_frames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) /* Register the DMA engine with the core */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) err = dma_async_device_register(&xdev->common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) dev_err(xdev->dev, "failed to register the dma device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) err = of_dma_controller_register(node, of_dma_xilinx_xlate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) xdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) dev_err(&pdev->dev, "Unable to register DMA to DT\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) dma_async_device_unregister(&xdev->common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) dev_info(&pdev->dev, "Xilinx AXI MCDMA Engine Driver Probed!!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) disable_clks:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) xdma_disable_allclks(xdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) for (i = 0; i < xdev->dma_config->max_channels; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) if (xdev->chan[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) xilinx_dma_chan_remove(xdev->chan[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) * xilinx_dma_remove - Driver remove function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) * @pdev: Pointer to the platform_device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) * Return: Always '0'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) static int xilinx_dma_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) of_dma_controller_free(pdev->dev.of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) dma_async_device_unregister(&xdev->common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) for (i = 0; i < xdev->dma_config->max_channels; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) if (xdev->chan[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) xilinx_dma_chan_remove(xdev->chan[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) xdma_disable_allclks(xdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) static struct platform_driver xilinx_vdma_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) .name = "xilinx-vdma",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) .of_match_table = xilinx_dma_of_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) .probe = xilinx_dma_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) .remove = xilinx_dma_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) module_platform_driver(xilinx_vdma_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) MODULE_AUTHOR("Xilinx, Inc.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) MODULE_DESCRIPTION("Xilinx VDMA driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) MODULE_LICENSE("GPL v2");