^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) STMicroelectronics SA 2017
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Author(s): M'boumba Cedric Madianga <cedric.madianga@gmail.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Pierre-Yves Mordret <pierre-yves.mordret@st.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Driver for STM32 MDMA controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Inspired by stm32-dma.c and dma-jz4780.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/dmapool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/iopoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/log2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/of_dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/reset.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include "virt-dma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /* MDMA Generic getter/setter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define STM32_MDMA_SHIFT(n) (ffs(n) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define STM32_MDMA_SET(n, mask) (((n) << STM32_MDMA_SHIFT(mask)) & \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) (mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define STM32_MDMA_GET(n, mask) (((n) & (mask)) >> \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) STM32_MDMA_SHIFT(mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define STM32_MDMA_GISR0 0x0000 /* MDMA Int Status Reg 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define STM32_MDMA_GISR1 0x0004 /* MDMA Int Status Reg 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /* MDMA Channel x interrupt/status register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define STM32_MDMA_CISR(x) (0x40 + 0x40 * (x)) /* x = 0..62 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define STM32_MDMA_CISR_CRQA BIT(16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define STM32_MDMA_CISR_TCIF BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define STM32_MDMA_CISR_BTIF BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define STM32_MDMA_CISR_BRTIF BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define STM32_MDMA_CISR_CTCIF BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define STM32_MDMA_CISR_TEIF BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /* MDMA Channel x interrupt flag clear register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define STM32_MDMA_CIFCR(x) (0x44 + 0x40 * (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define STM32_MDMA_CIFCR_CLTCIF BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define STM32_MDMA_CIFCR_CBTIF BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define STM32_MDMA_CIFCR_CBRTIF BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define STM32_MDMA_CIFCR_CCTCIF BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define STM32_MDMA_CIFCR_CTEIF BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define STM32_MDMA_CIFCR_CLEAR_ALL (STM32_MDMA_CIFCR_CLTCIF \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) | STM32_MDMA_CIFCR_CBTIF \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) | STM32_MDMA_CIFCR_CBRTIF \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) | STM32_MDMA_CIFCR_CCTCIF \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) | STM32_MDMA_CIFCR_CTEIF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /* MDMA Channel x error status register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define STM32_MDMA_CESR(x) (0x48 + 0x40 * (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define STM32_MDMA_CESR_BSE BIT(11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define STM32_MDMA_CESR_ASR BIT(10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define STM32_MDMA_CESR_TEMD BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define STM32_MDMA_CESR_TELD BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define STM32_MDMA_CESR_TED BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define STM32_MDMA_CESR_TEA_MASK GENMASK(6, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) /* MDMA Channel x control register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define STM32_MDMA_CCR(x) (0x4C + 0x40 * (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define STM32_MDMA_CCR_SWRQ BIT(16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define STM32_MDMA_CCR_WEX BIT(14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define STM32_MDMA_CCR_HEX BIT(13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define STM32_MDMA_CCR_BEX BIT(12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define STM32_MDMA_CCR_PL_MASK GENMASK(7, 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define STM32_MDMA_CCR_PL(n) STM32_MDMA_SET(n, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) STM32_MDMA_CCR_PL_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define STM32_MDMA_CCR_TCIE BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define STM32_MDMA_CCR_BTIE BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define STM32_MDMA_CCR_BRTIE BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define STM32_MDMA_CCR_CTCIE BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define STM32_MDMA_CCR_TEIE BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define STM32_MDMA_CCR_EN BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define STM32_MDMA_CCR_IRQ_MASK (STM32_MDMA_CCR_TCIE \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) | STM32_MDMA_CCR_BTIE \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) | STM32_MDMA_CCR_BRTIE \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) | STM32_MDMA_CCR_CTCIE \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) | STM32_MDMA_CCR_TEIE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) /* MDMA Channel x transfer configuration register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define STM32_MDMA_CTCR(x) (0x50 + 0x40 * (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define STM32_MDMA_CTCR_BWM BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define STM32_MDMA_CTCR_SWRM BIT(30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define STM32_MDMA_CTCR_TRGM_MSK GENMASK(29, 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define STM32_MDMA_CTCR_TRGM(n) STM32_MDMA_SET((n), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) STM32_MDMA_CTCR_TRGM_MSK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define STM32_MDMA_CTCR_TRGM_GET(n) STM32_MDMA_GET((n), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) STM32_MDMA_CTCR_TRGM_MSK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define STM32_MDMA_CTCR_PAM_MASK GENMASK(27, 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define STM32_MDMA_CTCR_PAM(n) STM32_MDMA_SET(n, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) STM32_MDMA_CTCR_PAM_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define STM32_MDMA_CTCR_PKE BIT(25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define STM32_MDMA_CTCR_TLEN_MSK GENMASK(24, 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define STM32_MDMA_CTCR_TLEN(n) STM32_MDMA_SET((n), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) STM32_MDMA_CTCR_TLEN_MSK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define STM32_MDMA_CTCR_TLEN_GET(n) STM32_MDMA_GET((n), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) STM32_MDMA_CTCR_TLEN_MSK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define STM32_MDMA_CTCR_LEN2_MSK GENMASK(25, 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define STM32_MDMA_CTCR_LEN2(n) STM32_MDMA_SET((n), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) STM32_MDMA_CTCR_LEN2_MSK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define STM32_MDMA_CTCR_LEN2_GET(n) STM32_MDMA_GET((n), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) STM32_MDMA_CTCR_LEN2_MSK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define STM32_MDMA_CTCR_DBURST_MASK GENMASK(17, 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define STM32_MDMA_CTCR_DBURST(n) STM32_MDMA_SET(n, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) STM32_MDMA_CTCR_DBURST_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define STM32_MDMA_CTCR_SBURST_MASK GENMASK(14, 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define STM32_MDMA_CTCR_SBURST(n) STM32_MDMA_SET(n, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) STM32_MDMA_CTCR_SBURST_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define STM32_MDMA_CTCR_DINCOS_MASK GENMASK(11, 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define STM32_MDMA_CTCR_DINCOS(n) STM32_MDMA_SET((n), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) STM32_MDMA_CTCR_DINCOS_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define STM32_MDMA_CTCR_SINCOS_MASK GENMASK(9, 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #define STM32_MDMA_CTCR_SINCOS(n) STM32_MDMA_SET((n), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) STM32_MDMA_CTCR_SINCOS_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define STM32_MDMA_CTCR_DSIZE_MASK GENMASK(7, 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #define STM32_MDMA_CTCR_DSIZE(n) STM32_MDMA_SET(n, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) STM32_MDMA_CTCR_DSIZE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #define STM32_MDMA_CTCR_SSIZE_MASK GENMASK(5, 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #define STM32_MDMA_CTCR_SSIZE(n) STM32_MDMA_SET(n, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) STM32_MDMA_CTCR_SSIZE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define STM32_MDMA_CTCR_DINC_MASK GENMASK(3, 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define STM32_MDMA_CTCR_DINC(n) STM32_MDMA_SET((n), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) STM32_MDMA_CTCR_DINC_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #define STM32_MDMA_CTCR_SINC_MASK GENMASK(1, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #define STM32_MDMA_CTCR_SINC(n) STM32_MDMA_SET((n), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) STM32_MDMA_CTCR_SINC_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #define STM32_MDMA_CTCR_CFG_MASK (STM32_MDMA_CTCR_SINC_MASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) | STM32_MDMA_CTCR_DINC_MASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) | STM32_MDMA_CTCR_SINCOS_MASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) | STM32_MDMA_CTCR_DINCOS_MASK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) | STM32_MDMA_CTCR_LEN2_MSK \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) | STM32_MDMA_CTCR_TRGM_MSK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /* MDMA Channel x block number of data register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #define STM32_MDMA_CBNDTR(x) (0x54 + 0x40 * (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #define STM32_MDMA_CBNDTR_BRC_MK GENMASK(31, 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #define STM32_MDMA_CBNDTR_BRC(n) STM32_MDMA_SET(n, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) STM32_MDMA_CBNDTR_BRC_MK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #define STM32_MDMA_CBNDTR_BRC_GET(n) STM32_MDMA_GET((n), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) STM32_MDMA_CBNDTR_BRC_MK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #define STM32_MDMA_CBNDTR_BRDUM BIT(19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #define STM32_MDMA_CBNDTR_BRSUM BIT(18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #define STM32_MDMA_CBNDTR_BNDT_MASK GENMASK(16, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) #define STM32_MDMA_CBNDTR_BNDT(n) STM32_MDMA_SET(n, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) STM32_MDMA_CBNDTR_BNDT_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /* MDMA Channel x source address register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) #define STM32_MDMA_CSAR(x) (0x58 + 0x40 * (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /* MDMA Channel x destination address register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) #define STM32_MDMA_CDAR(x) (0x5C + 0x40 * (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /* MDMA Channel x block repeat address update register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #define STM32_MDMA_CBRUR(x) (0x60 + 0x40 * (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #define STM32_MDMA_CBRUR_DUV_MASK GENMASK(31, 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) #define STM32_MDMA_CBRUR_DUV(n) STM32_MDMA_SET(n, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) STM32_MDMA_CBRUR_DUV_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #define STM32_MDMA_CBRUR_SUV_MASK GENMASK(15, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) #define STM32_MDMA_CBRUR_SUV(n) STM32_MDMA_SET(n, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) STM32_MDMA_CBRUR_SUV_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /* MDMA Channel x link address register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) #define STM32_MDMA_CLAR(x) (0x64 + 0x40 * (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /* MDMA Channel x trigger and bus selection register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) #define STM32_MDMA_CTBR(x) (0x68 + 0x40 * (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #define STM32_MDMA_CTBR_DBUS BIT(17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #define STM32_MDMA_CTBR_SBUS BIT(16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) #define STM32_MDMA_CTBR_TSEL_MASK GENMASK(5, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) #define STM32_MDMA_CTBR_TSEL(n) STM32_MDMA_SET(n, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) STM32_MDMA_CTBR_TSEL_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /* MDMA Channel x mask address register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #define STM32_MDMA_CMAR(x) (0x70 + 0x40 * (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /* MDMA Channel x mask data register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) #define STM32_MDMA_CMDR(x) (0x74 + 0x40 * (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) #define STM32_MDMA_MAX_BUF_LEN 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) #define STM32_MDMA_MAX_BLOCK_LEN 65536
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) #define STM32_MDMA_MAX_CHANNELS 63
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) #define STM32_MDMA_MAX_REQUESTS 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) #define STM32_MDMA_MAX_BURST 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) #define STM32_MDMA_VERY_HIGH_PRIORITY 0x11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) enum stm32_mdma_trigger_mode {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) STM32_MDMA_BUFFER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) STM32_MDMA_BLOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) STM32_MDMA_BLOCK_REP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) STM32_MDMA_LINKED_LIST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) enum stm32_mdma_width {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) STM32_MDMA_BYTE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) STM32_MDMA_HALF_WORD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) STM32_MDMA_WORD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) STM32_MDMA_DOUBLE_WORD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) enum stm32_mdma_inc_mode {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) STM32_MDMA_FIXED = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) STM32_MDMA_INC = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) STM32_MDMA_DEC = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct stm32_mdma_chan_config {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) u32 request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) u32 priority_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) u32 transfer_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) u32 mask_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) u32 mask_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct stm32_mdma_hwdesc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) u32 ctcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) u32 cbndtr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) u32 csar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) u32 cdar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) u32 cbrur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) u32 clar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) u32 ctbr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) u32 dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) u32 cmar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) u32 cmdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) } __aligned(64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct stm32_mdma_desc_node {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) struct stm32_mdma_hwdesc *hwdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) dma_addr_t hwdesc_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) struct stm32_mdma_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) struct virt_dma_desc vdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) u32 ccr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) bool cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) u32 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) struct stm32_mdma_desc_node node[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) struct stm32_mdma_chan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) struct virt_dma_chan vchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) struct dma_pool *desc_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) u32 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) struct stm32_mdma_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) u32 curr_hwdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct dma_slave_config dma_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) struct stm32_mdma_chan_config chan_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) bool busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) u32 mem_burst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) u32 mem_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) struct stm32_mdma_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) struct dma_device ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) u32 nr_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) u32 nr_requests;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) u32 nr_ahb_addr_masks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) struct stm32_mdma_chan chan[STM32_MDMA_MAX_CHANNELS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) u32 ahb_addr_masks[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static struct stm32_mdma_device *stm32_mdma_get_dev(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) struct stm32_mdma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return container_of(chan->vchan.chan.device, struct stm32_mdma_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) static struct stm32_mdma_chan *to_stm32_mdma_chan(struct dma_chan *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) return container_of(c, struct stm32_mdma_chan, vchan.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) static struct stm32_mdma_desc *to_stm32_mdma_desc(struct virt_dma_desc *vdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) return container_of(vdesc, struct stm32_mdma_desc, vdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) static struct device *chan2dev(struct stm32_mdma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) return &chan->vchan.chan.dev->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) static struct device *mdma2dev(struct stm32_mdma_device *mdma_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) return mdma_dev->ddev.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) static u32 stm32_mdma_read(struct stm32_mdma_device *dmadev, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) return readl_relaxed(dmadev->base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) static void stm32_mdma_write(struct stm32_mdma_device *dmadev, u32 reg, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) writel_relaxed(val, dmadev->base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) static void stm32_mdma_set_bits(struct stm32_mdma_device *dmadev, u32 reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) u32 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) void __iomem *addr = dmadev->base + reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) writel_relaxed(readl_relaxed(addr) | mask, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static void stm32_mdma_clr_bits(struct stm32_mdma_device *dmadev, u32 reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) u32 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) void __iomem *addr = dmadev->base + reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) writel_relaxed(readl_relaxed(addr) & ~mask, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) static struct stm32_mdma_desc *stm32_mdma_alloc_desc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) struct stm32_mdma_chan *chan, u32 count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) struct stm32_mdma_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) desc = kzalloc(offsetof(typeof(*desc), node[count]), GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) for (i = 0; i < count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) desc->node[i].hwdesc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) dma_pool_alloc(chan->desc_pool, GFP_NOWAIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) &desc->node[i].hwdesc_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (!desc->node[i].hwdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) desc->count = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) return desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) dev_err(chan2dev(chan), "Failed to allocate descriptor\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) while (--i >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) dma_pool_free(chan->desc_pool, desc->node[i].hwdesc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) desc->node[i].hwdesc_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) kfree(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) static void stm32_mdma_desc_free(struct virt_dma_desc *vdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) struct stm32_mdma_desc *desc = to_stm32_mdma_desc(vdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) struct stm32_mdma_chan *chan = to_stm32_mdma_chan(vdesc->tx.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) for (i = 0; i < desc->count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) dma_pool_free(chan->desc_pool, desc->node[i].hwdesc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) desc->node[i].hwdesc_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) kfree(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) static int stm32_mdma_get_width(struct stm32_mdma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) enum dma_slave_buswidth width)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) switch (width) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) case DMA_SLAVE_BUSWIDTH_1_BYTE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) case DMA_SLAVE_BUSWIDTH_2_BYTES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) case DMA_SLAVE_BUSWIDTH_4_BYTES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) case DMA_SLAVE_BUSWIDTH_8_BYTES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) return ffs(width) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) dev_err(chan2dev(chan), "Dma bus width %i not supported\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) static enum dma_slave_buswidth stm32_mdma_get_max_width(dma_addr_t addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) u32 buf_len, u32 tlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) enum dma_slave_buswidth max_width = DMA_SLAVE_BUSWIDTH_8_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) for (max_width = DMA_SLAVE_BUSWIDTH_8_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) max_width > DMA_SLAVE_BUSWIDTH_1_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) max_width >>= 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * Address and buffer length both have to be aligned on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * bus width
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if ((((buf_len | addr) & (max_width - 1)) == 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) tlen >= max_width)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) return max_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) static u32 stm32_mdma_get_best_burst(u32 buf_len, u32 tlen, u32 max_burst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) enum dma_slave_buswidth width)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) u32 best_burst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) best_burst = min((u32)1 << __ffs(tlen | buf_len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) max_burst * width) / width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) return (best_burst > 0) ? best_burst : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) static int stm32_mdma_disable_chan(struct stm32_mdma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) u32 ccr, cisr, id, reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) id = chan->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) reg = STM32_MDMA_CCR(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) /* Disable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) stm32_mdma_clr_bits(dmadev, reg, STM32_MDMA_CCR_IRQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) ccr = stm32_mdma_read(dmadev, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (ccr & STM32_MDMA_CCR_EN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) stm32_mdma_clr_bits(dmadev, reg, STM32_MDMA_CCR_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) /* Ensure that any ongoing transfer has been completed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) ret = readl_relaxed_poll_timeout_atomic(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) dmadev->base + STM32_MDMA_CISR(id), cisr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) (cisr & STM32_MDMA_CISR_CTCIF), 10, 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) dev_err(chan2dev(chan), "%s: timeout!\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) static void stm32_mdma_stop(struct stm32_mdma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) /* Disable DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) ret = stm32_mdma_disable_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) /* Clear interrupt status if it is there */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(chan->id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) dev_dbg(chan2dev(chan), "%s(): clearing interrupt: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) __func__, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) stm32_mdma_set_bits(dmadev, STM32_MDMA_CIFCR(chan->id), status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) chan->busy = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) static void stm32_mdma_set_bus(struct stm32_mdma_device *dmadev, u32 *ctbr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) u32 ctbr_mask, u32 src_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) u32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) /* Check if memory device is on AHB or AXI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) *ctbr &= ~ctbr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) mask = src_addr & 0xF0000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) for (i = 0; i < dmadev->nr_ahb_addr_masks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) if (mask == dmadev->ahb_addr_masks[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) *ctbr |= ctbr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) static int stm32_mdma_set_xfer_param(struct stm32_mdma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) enum dma_transfer_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) u32 *mdma_ccr, u32 *mdma_ctcr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) u32 *mdma_ctbr, dma_addr_t addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) u32 buf_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) struct stm32_mdma_chan_config *chan_config = &chan->chan_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) enum dma_slave_buswidth src_addr_width, dst_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) phys_addr_t src_addr, dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) int src_bus_width, dst_bus_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) u32 src_maxburst, dst_maxburst, src_best_burst, dst_best_burst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) u32 ccr, ctcr, ctbr, tlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) src_addr_width = chan->dma_config.src_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) dst_addr_width = chan->dma_config.dst_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) src_maxburst = chan->dma_config.src_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) dst_maxburst = chan->dma_config.dst_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) /* Enable HW request mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) ctcr &= ~STM32_MDMA_CTCR_SWRM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) /* Set DINC, SINC, DINCOS, SINCOS, TRGM and TLEN retrieve from DT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) ctcr &= ~STM32_MDMA_CTCR_CFG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) ctcr |= chan_config->transfer_config & STM32_MDMA_CTCR_CFG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * For buffer transfer length (TLEN) we have to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * the number of bytes - 1 in CTCR register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) tlen = STM32_MDMA_CTCR_LEN2_GET(ctcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) ctcr &= ~STM32_MDMA_CTCR_LEN2_MSK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) ctcr |= STM32_MDMA_CTCR_TLEN((tlen - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) /* Disable Pack Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) ctcr &= ~STM32_MDMA_CTCR_PKE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) /* Check burst size constraints */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) if (src_maxburst * src_addr_width > STM32_MDMA_MAX_BURST ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) dst_maxburst * dst_addr_width > STM32_MDMA_MAX_BURST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) dev_err(chan2dev(chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) "burst size * bus width higher than %d bytes\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) STM32_MDMA_MAX_BURST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) if ((!is_power_of_2(src_maxburst) && src_maxburst > 0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) (!is_power_of_2(dst_maxburst) && dst_maxburst > 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) dev_err(chan2dev(chan), "burst size must be a power of 2\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * Configure channel control:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * - Clear SW request as in this case this is a HW one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * - Clear WEX, HEX and BEX bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) * - Set priority level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) ccr &= ~(STM32_MDMA_CCR_SWRQ | STM32_MDMA_CCR_WEX | STM32_MDMA_CCR_HEX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) STM32_MDMA_CCR_BEX | STM32_MDMA_CCR_PL_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) ccr |= STM32_MDMA_CCR_PL(chan_config->priority_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) /* Configure Trigger selection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) ctbr &= ~STM32_MDMA_CTBR_TSEL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) ctbr |= STM32_MDMA_CTBR_TSEL(chan_config->request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) switch (direction) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) case DMA_MEM_TO_DEV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) dst_addr = chan->dma_config.dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) /* Set device data size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) dst_bus_width = stm32_mdma_get_width(chan, dst_addr_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (dst_bus_width < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) return dst_bus_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) ctcr &= ~STM32_MDMA_CTCR_DSIZE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) ctcr |= STM32_MDMA_CTCR_DSIZE(dst_bus_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) /* Set device burst value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) dst_best_burst = stm32_mdma_get_best_burst(buf_len, tlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) dst_maxburst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) dst_addr_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) chan->mem_burst = dst_best_burst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) ctcr &= ~STM32_MDMA_CTCR_DBURST_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) ctcr |= STM32_MDMA_CTCR_DBURST((ilog2(dst_best_burst)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) /* Set memory data size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) src_addr_width = stm32_mdma_get_max_width(addr, buf_len, tlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) chan->mem_width = src_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) src_bus_width = stm32_mdma_get_width(chan, src_addr_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) if (src_bus_width < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) return src_bus_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) ctcr &= ~STM32_MDMA_CTCR_SSIZE_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) STM32_MDMA_CTCR_SINCOS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) ctcr |= STM32_MDMA_CTCR_SSIZE(src_bus_width) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) STM32_MDMA_CTCR_SINCOS(src_bus_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) /* Set memory burst value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) src_maxburst = STM32_MDMA_MAX_BUF_LEN / src_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) src_best_burst = stm32_mdma_get_best_burst(buf_len, tlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) src_maxburst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) src_addr_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) chan->mem_burst = src_best_burst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) ctcr &= ~STM32_MDMA_CTCR_SBURST_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) ctcr |= STM32_MDMA_CTCR_SBURST((ilog2(src_best_burst)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) /* Select bus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_DBUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) dst_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (dst_bus_width != src_bus_width)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) ctcr |= STM32_MDMA_CTCR_PKE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) /* Set destination address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) stm32_mdma_write(dmadev, STM32_MDMA_CDAR(chan->id), dst_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) case DMA_DEV_TO_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) src_addr = chan->dma_config.src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) /* Set device data size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) src_bus_width = stm32_mdma_get_width(chan, src_addr_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (src_bus_width < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) return src_bus_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) ctcr &= ~STM32_MDMA_CTCR_SSIZE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) ctcr |= STM32_MDMA_CTCR_SSIZE(src_bus_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) /* Set device burst value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) src_best_burst = stm32_mdma_get_best_burst(buf_len, tlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) src_maxburst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) src_addr_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) ctcr &= ~STM32_MDMA_CTCR_SBURST_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) ctcr |= STM32_MDMA_CTCR_SBURST((ilog2(src_best_burst)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) /* Set memory data size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) dst_addr_width = stm32_mdma_get_max_width(addr, buf_len, tlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) chan->mem_width = dst_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) dst_bus_width = stm32_mdma_get_width(chan, dst_addr_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (dst_bus_width < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) return dst_bus_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) ctcr &= ~(STM32_MDMA_CTCR_DSIZE_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) STM32_MDMA_CTCR_DINCOS_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) ctcr |= STM32_MDMA_CTCR_DSIZE(dst_bus_width) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) STM32_MDMA_CTCR_DINCOS(dst_bus_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) /* Set memory burst value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) dst_maxburst = STM32_MDMA_MAX_BUF_LEN / dst_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) dst_best_burst = stm32_mdma_get_best_burst(buf_len, tlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) dst_maxburst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) dst_addr_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) ctcr &= ~STM32_MDMA_CTCR_DBURST_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) ctcr |= STM32_MDMA_CTCR_DBURST((ilog2(dst_best_burst)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) /* Select bus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_SBUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) src_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) if (dst_bus_width != src_bus_width)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) ctcr |= STM32_MDMA_CTCR_PKE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) /* Set source address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) stm32_mdma_write(dmadev, STM32_MDMA_CSAR(chan->id), src_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) dev_err(chan2dev(chan), "Dma direction is not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) *mdma_ccr = ccr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) *mdma_ctcr = ctcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) *mdma_ctbr = ctbr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) static void stm32_mdma_dump_hwdesc(struct stm32_mdma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) struct stm32_mdma_desc_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) dev_dbg(chan2dev(chan), "hwdesc: %pad\n", &node->hwdesc_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) dev_dbg(chan2dev(chan), "CTCR: 0x%08x\n", node->hwdesc->ctcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) dev_dbg(chan2dev(chan), "CBNDTR: 0x%08x\n", node->hwdesc->cbndtr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) dev_dbg(chan2dev(chan), "CSAR: 0x%08x\n", node->hwdesc->csar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) dev_dbg(chan2dev(chan), "CDAR: 0x%08x\n", node->hwdesc->cdar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) dev_dbg(chan2dev(chan), "CBRUR: 0x%08x\n", node->hwdesc->cbrur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) dev_dbg(chan2dev(chan), "CLAR: 0x%08x\n", node->hwdesc->clar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) dev_dbg(chan2dev(chan), "CTBR: 0x%08x\n", node->hwdesc->ctbr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) dev_dbg(chan2dev(chan), "CMAR: 0x%08x\n", node->hwdesc->cmar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) dev_dbg(chan2dev(chan), "CMDR: 0x%08x\n\n", node->hwdesc->cmdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) static void stm32_mdma_setup_hwdesc(struct stm32_mdma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) struct stm32_mdma_desc *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) enum dma_transfer_direction dir, u32 count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) dma_addr_t src_addr, dma_addr_t dst_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) u32 len, u32 ctcr, u32 ctbr, bool is_last,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) bool is_first, bool is_cyclic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) struct stm32_mdma_chan_config *config = &chan->chan_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) struct stm32_mdma_hwdesc *hwdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) u32 next = count + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) hwdesc = desc->node[count].hwdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) hwdesc->ctcr = ctcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) hwdesc->cbndtr &= ~(STM32_MDMA_CBNDTR_BRC_MK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) STM32_MDMA_CBNDTR_BRDUM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) STM32_MDMA_CBNDTR_BRSUM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) STM32_MDMA_CBNDTR_BNDT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) hwdesc->cbndtr |= STM32_MDMA_CBNDTR_BNDT(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) hwdesc->csar = src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) hwdesc->cdar = dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) hwdesc->cbrur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) hwdesc->ctbr = ctbr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) hwdesc->cmar = config->mask_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) hwdesc->cmdr = config->mask_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) if (is_last) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (is_cyclic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) hwdesc->clar = desc->node[0].hwdesc_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) hwdesc->clar = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) hwdesc->clar = desc->node[next].hwdesc_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) stm32_mdma_dump_hwdesc(chan, &desc->node[count]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) static int stm32_mdma_setup_xfer(struct stm32_mdma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) struct stm32_mdma_desc *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) struct scatterlist *sgl, u32 sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) enum dma_transfer_direction direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) struct dma_slave_config *dma_config = &chan->dma_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) dma_addr_t src_addr, dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) u32 ccr, ctcr, ctbr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) int i, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) for_each_sg(sgl, sg, sg_len, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) if (sg_dma_len(sg) > STM32_MDMA_MAX_BLOCK_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) dev_err(chan2dev(chan), "Invalid block len\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) if (direction == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) src_addr = sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) dst_addr = dma_config->dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) ret = stm32_mdma_set_xfer_param(chan, direction, &ccr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) &ctcr, &ctbr, src_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) sg_dma_len(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_SBUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) src_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) src_addr = dma_config->src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) dst_addr = sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) ret = stm32_mdma_set_xfer_param(chan, direction, &ccr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) &ctcr, &ctbr, dst_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) sg_dma_len(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_DBUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) dst_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) stm32_mdma_setup_hwdesc(chan, desc, direction, i, src_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) dst_addr, sg_dma_len(sg), ctcr, ctbr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) i == sg_len - 1, i == 0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) /* Enable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) ccr &= ~STM32_MDMA_CCR_IRQ_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) ccr |= STM32_MDMA_CCR_TEIE | STM32_MDMA_CCR_CTCIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) if (sg_len > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) ccr |= STM32_MDMA_CCR_BTIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) desc->ccr = ccr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) stm32_mdma_prep_slave_sg(struct dma_chan *c, struct scatterlist *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) u32 sg_len, enum dma_transfer_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) unsigned long flags, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) struct stm32_mdma_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) * Once DMA is in setup cyclic mode the channel we cannot assign this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) * channel anymore. The DMA channel needs to be aborted or terminated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) * for allowing another request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) if (chan->desc && chan->desc->cyclic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) dev_err(chan2dev(chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) "Request not allowed when dma in cyclic mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) desc = stm32_mdma_alloc_desc(chan, sg_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) ret = stm32_mdma_setup_xfer(chan, desc, sgl, sg_len, direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) goto xfer_setup_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) desc->cyclic = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) xfer_setup_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) for (i = 0; i < desc->count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) dma_pool_free(chan->desc_pool, desc->node[i].hwdesc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) desc->node[i].hwdesc_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) kfree(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) stm32_mdma_prep_dma_cyclic(struct dma_chan *c, dma_addr_t buf_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) size_t buf_len, size_t period_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) enum dma_transfer_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) struct dma_slave_config *dma_config = &chan->dma_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) struct stm32_mdma_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) dma_addr_t src_addr, dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) u32 ccr, ctcr, ctbr, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) * Once DMA is in setup cyclic mode the channel we cannot assign this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) * channel anymore. The DMA channel needs to be aborted or terminated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) * for allowing another request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) if (chan->desc && chan->desc->cyclic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) dev_err(chan2dev(chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) "Request not allowed when dma in cyclic mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) if (!buf_len || !period_len || period_len > STM32_MDMA_MAX_BLOCK_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) dev_err(chan2dev(chan), "Invalid buffer/period len\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) if (buf_len % period_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) dev_err(chan2dev(chan), "buf_len not multiple of period_len\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) count = buf_len / period_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) desc = stm32_mdma_alloc_desc(chan, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) /* Select bus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) if (direction == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) src_addr = buf_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) ret = stm32_mdma_set_xfer_param(chan, direction, &ccr, &ctcr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) &ctbr, src_addr, period_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_SBUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) src_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) dst_addr = buf_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) ret = stm32_mdma_set_xfer_param(chan, direction, &ccr, &ctcr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) &ctbr, dst_addr, period_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_DBUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) dst_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) goto xfer_setup_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) /* Enable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) ccr &= ~STM32_MDMA_CCR_IRQ_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) ccr |= STM32_MDMA_CCR_TEIE | STM32_MDMA_CCR_CTCIE | STM32_MDMA_CCR_BTIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) desc->ccr = ccr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) /* Configure hwdesc list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) for (i = 0; i < count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) if (direction == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) src_addr = buf_addr + i * period_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) dst_addr = dma_config->dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) src_addr = dma_config->src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) dst_addr = buf_addr + i * period_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) stm32_mdma_setup_hwdesc(chan, desc, direction, i, src_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) dst_addr, period_len, ctcr, ctbr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) i == count - 1, i == 0, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) desc->cyclic = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) xfer_setup_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) for (i = 0; i < desc->count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) dma_pool_free(chan->desc_pool, desc->node[i].hwdesc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) desc->node[i].hwdesc_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) kfree(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) stm32_mdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) size_t len, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) enum dma_slave_buswidth max_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) struct stm32_mdma_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) struct stm32_mdma_hwdesc *hwdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) u32 ccr, ctcr, ctbr, cbndtr, count, max_burst, mdma_burst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) u32 best_burst, tlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) size_t xfer_count, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) int src_bus_width, dst_bus_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) * Once DMA is in setup cyclic mode the channel we cannot assign this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) * channel anymore. The DMA channel needs to be aborted or terminated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) * to allow another request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) if (chan->desc && chan->desc->cyclic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) dev_err(chan2dev(chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) "Request not allowed when dma in cyclic mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) count = DIV_ROUND_UP(len, STM32_MDMA_MAX_BLOCK_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) desc = stm32_mdma_alloc_desc(chan, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) /* Enable sw req, some interrupts and clear other bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) ccr &= ~(STM32_MDMA_CCR_WEX | STM32_MDMA_CCR_HEX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) STM32_MDMA_CCR_BEX | STM32_MDMA_CCR_PL_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) STM32_MDMA_CCR_IRQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) ccr |= STM32_MDMA_CCR_TEIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) /* Enable SW request mode, dest/src inc and clear other bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) ctcr &= ~(STM32_MDMA_CTCR_BWM | STM32_MDMA_CTCR_TRGM_MSK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) STM32_MDMA_CTCR_PAM_MASK | STM32_MDMA_CTCR_PKE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) STM32_MDMA_CTCR_TLEN_MSK | STM32_MDMA_CTCR_DBURST_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) STM32_MDMA_CTCR_SBURST_MASK | STM32_MDMA_CTCR_DINCOS_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) STM32_MDMA_CTCR_SINCOS_MASK | STM32_MDMA_CTCR_DSIZE_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) STM32_MDMA_CTCR_SSIZE_MASK | STM32_MDMA_CTCR_DINC_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) STM32_MDMA_CTCR_SINC_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) ctcr |= STM32_MDMA_CTCR_SWRM | STM32_MDMA_CTCR_SINC(STM32_MDMA_INC) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) STM32_MDMA_CTCR_DINC(STM32_MDMA_INC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) /* Reset HW request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) ctbr &= ~STM32_MDMA_CTBR_TSEL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) /* Select bus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_SBUS, src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_DBUS, dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) /* Clear CBNDTR registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) cbndtr &= ~(STM32_MDMA_CBNDTR_BRC_MK | STM32_MDMA_CBNDTR_BRDUM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) STM32_MDMA_CBNDTR_BRSUM | STM32_MDMA_CBNDTR_BNDT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) if (len <= STM32_MDMA_MAX_BLOCK_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) cbndtr |= STM32_MDMA_CBNDTR_BNDT(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) if (len <= STM32_MDMA_MAX_BUF_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) /* Setup a buffer transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) ccr |= STM32_MDMA_CCR_TCIE | STM32_MDMA_CCR_CTCIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) ctcr |= STM32_MDMA_CTCR_TRGM(STM32_MDMA_BUFFER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) /* Setup a block transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) ccr |= STM32_MDMA_CCR_BTIE | STM32_MDMA_CCR_CTCIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) ctcr |= STM32_MDMA_CTCR_TRGM(STM32_MDMA_BLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) tlen = STM32_MDMA_MAX_BUF_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) ctcr |= STM32_MDMA_CTCR_TLEN((tlen - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) /* Set source best burst size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) max_width = stm32_mdma_get_max_width(src, len, tlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) src_bus_width = stm32_mdma_get_width(chan, max_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) max_burst = tlen / max_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) best_burst = stm32_mdma_get_best_burst(len, tlen, max_burst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) max_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) mdma_burst = ilog2(best_burst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) ctcr |= STM32_MDMA_CTCR_SBURST(mdma_burst) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) STM32_MDMA_CTCR_SSIZE(src_bus_width) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) STM32_MDMA_CTCR_SINCOS(src_bus_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) /* Set destination best burst size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) max_width = stm32_mdma_get_max_width(dest, len, tlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) dst_bus_width = stm32_mdma_get_width(chan, max_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) max_burst = tlen / max_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) best_burst = stm32_mdma_get_best_burst(len, tlen, max_burst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) max_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) mdma_burst = ilog2(best_burst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) ctcr |= STM32_MDMA_CTCR_DBURST(mdma_burst) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) STM32_MDMA_CTCR_DSIZE(dst_bus_width) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) STM32_MDMA_CTCR_DINCOS(dst_bus_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) if (dst_bus_width != src_bus_width)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) ctcr |= STM32_MDMA_CTCR_PKE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) /* Prepare hardware descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) hwdesc = desc->node[0].hwdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) hwdesc->ctcr = ctcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) hwdesc->cbndtr = cbndtr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) hwdesc->csar = src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) hwdesc->cdar = dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) hwdesc->cbrur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) hwdesc->clar = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) hwdesc->ctbr = ctbr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) hwdesc->cmar = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) hwdesc->cmdr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) stm32_mdma_dump_hwdesc(chan, &desc->node[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) /* Setup a LLI transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) ctcr |= STM32_MDMA_CTCR_TRGM(STM32_MDMA_LINKED_LIST) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) STM32_MDMA_CTCR_TLEN((STM32_MDMA_MAX_BUF_LEN - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) ccr |= STM32_MDMA_CCR_BTIE | STM32_MDMA_CCR_CTCIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) tlen = STM32_MDMA_MAX_BUF_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) for (i = 0, offset = 0; offset < len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) i++, offset += xfer_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) xfer_count = min_t(size_t, len - offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) STM32_MDMA_MAX_BLOCK_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) /* Set source best burst size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) max_width = stm32_mdma_get_max_width(src, len, tlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) src_bus_width = stm32_mdma_get_width(chan, max_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) max_burst = tlen / max_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) best_burst = stm32_mdma_get_best_burst(len, tlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) max_burst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) max_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) mdma_burst = ilog2(best_burst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) ctcr |= STM32_MDMA_CTCR_SBURST(mdma_burst) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) STM32_MDMA_CTCR_SSIZE(src_bus_width) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) STM32_MDMA_CTCR_SINCOS(src_bus_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) /* Set destination best burst size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) max_width = stm32_mdma_get_max_width(dest, len, tlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) dst_bus_width = stm32_mdma_get_width(chan, max_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) max_burst = tlen / max_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) best_burst = stm32_mdma_get_best_burst(len, tlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) max_burst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) max_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) mdma_burst = ilog2(best_burst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) ctcr |= STM32_MDMA_CTCR_DBURST(mdma_burst) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) STM32_MDMA_CTCR_DSIZE(dst_bus_width) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) STM32_MDMA_CTCR_DINCOS(dst_bus_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) if (dst_bus_width != src_bus_width)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) ctcr |= STM32_MDMA_CTCR_PKE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) /* Prepare hardware descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) stm32_mdma_setup_hwdesc(chan, desc, DMA_MEM_TO_MEM, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) src + offset, dest + offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) xfer_count, ctcr, ctbr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) i == count - 1, i == 0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) desc->ccr = ccr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) desc->cyclic = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) static void stm32_mdma_dump_reg(struct stm32_mdma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) dev_dbg(chan2dev(chan), "CCR: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) dev_dbg(chan2dev(chan), "CTCR: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) dev_dbg(chan2dev(chan), "CBNDTR: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) dev_dbg(chan2dev(chan), "CSAR: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) stm32_mdma_read(dmadev, STM32_MDMA_CSAR(chan->id)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) dev_dbg(chan2dev(chan), "CDAR: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) stm32_mdma_read(dmadev, STM32_MDMA_CDAR(chan->id)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) dev_dbg(chan2dev(chan), "CBRUR: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) stm32_mdma_read(dmadev, STM32_MDMA_CBRUR(chan->id)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) dev_dbg(chan2dev(chan), "CLAR: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) stm32_mdma_read(dmadev, STM32_MDMA_CLAR(chan->id)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) dev_dbg(chan2dev(chan), "CTBR: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) dev_dbg(chan2dev(chan), "CMAR: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) stm32_mdma_read(dmadev, STM32_MDMA_CMAR(chan->id)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) dev_dbg(chan2dev(chan), "CMDR: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) stm32_mdma_read(dmadev, STM32_MDMA_CMDR(chan->id)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) static void stm32_mdma_start_transfer(struct stm32_mdma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) struct virt_dma_desc *vdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) struct stm32_mdma_hwdesc *hwdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) u32 id = chan->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) u32 status, reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) vdesc = vchan_next_desc(&chan->vchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) if (!vdesc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) chan->desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) list_del(&vdesc->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) chan->desc = to_stm32_mdma_desc(vdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) hwdesc = chan->desc->node[0].hwdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) chan->curr_hwdesc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) stm32_mdma_write(dmadev, STM32_MDMA_CCR(id), chan->desc->ccr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) stm32_mdma_write(dmadev, STM32_MDMA_CTCR(id), hwdesc->ctcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) stm32_mdma_write(dmadev, STM32_MDMA_CBNDTR(id), hwdesc->cbndtr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) stm32_mdma_write(dmadev, STM32_MDMA_CSAR(id), hwdesc->csar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) stm32_mdma_write(dmadev, STM32_MDMA_CDAR(id), hwdesc->cdar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) stm32_mdma_write(dmadev, STM32_MDMA_CBRUR(id), hwdesc->cbrur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) stm32_mdma_write(dmadev, STM32_MDMA_CLAR(id), hwdesc->clar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) stm32_mdma_write(dmadev, STM32_MDMA_CTBR(id), hwdesc->ctbr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) stm32_mdma_write(dmadev, STM32_MDMA_CMAR(id), hwdesc->cmar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) stm32_mdma_write(dmadev, STM32_MDMA_CMDR(id), hwdesc->cmdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) /* Clear interrupt status if it is there */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) stm32_mdma_set_bits(dmadev, STM32_MDMA_CIFCR(id), status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) stm32_mdma_dump_reg(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) /* Start DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) stm32_mdma_set_bits(dmadev, STM32_MDMA_CCR(id), STM32_MDMA_CCR_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) /* Set SW request in case of MEM2MEM transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) if (hwdesc->ctcr & STM32_MDMA_CTCR_SWRM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) reg = STM32_MDMA_CCR(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CCR_SWRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) chan->busy = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) static void stm32_mdma_issue_pending(struct dma_chan *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) spin_lock_irqsave(&chan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) if (!vchan_issue_pending(&chan->vchan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) if (!chan->desc && !chan->busy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) stm32_mdma_start_transfer(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) spin_unlock_irqrestore(&chan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) static int stm32_mdma_pause(struct dma_chan *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) spin_lock_irqsave(&chan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) ret = stm32_mdma_disable_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) spin_unlock_irqrestore(&chan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) dev_dbg(chan2dev(chan), "vchan %pK: pause\n", &chan->vchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) static int stm32_mdma_resume(struct dma_chan *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) struct stm32_mdma_hwdesc *hwdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) u32 status, reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) hwdesc = chan->desc->node[chan->curr_hwdesc].hwdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) spin_lock_irqsave(&chan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) /* Re-configure control register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) stm32_mdma_write(dmadev, STM32_MDMA_CCR(chan->id), chan->desc->ccr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) /* Clear interrupt status if it is there */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(chan->id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) stm32_mdma_set_bits(dmadev, STM32_MDMA_CIFCR(chan->id), status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) stm32_mdma_dump_reg(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) /* Re-start DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) reg = STM32_MDMA_CCR(chan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CCR_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) /* Set SW request in case of MEM2MEM transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) if (hwdesc->ctcr & STM32_MDMA_CTCR_SWRM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CCR_SWRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) spin_unlock_irqrestore(&chan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) dev_dbg(chan2dev(chan), "vchan %pK: resume\n", &chan->vchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) static int stm32_mdma_terminate_all(struct dma_chan *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) LIST_HEAD(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) spin_lock_irqsave(&chan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) if (chan->desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) vchan_terminate_vdesc(&chan->desc->vdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) if (chan->busy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) stm32_mdma_stop(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) chan->desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) vchan_get_all_descriptors(&chan->vchan, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) spin_unlock_irqrestore(&chan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) vchan_dma_desc_free_list(&chan->vchan, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) static void stm32_mdma_synchronize(struct dma_chan *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) vchan_synchronize(&chan->vchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) static int stm32_mdma_slave_config(struct dma_chan *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) struct dma_slave_config *config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) memcpy(&chan->dma_config, config, sizeof(*config));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) static size_t stm32_mdma_desc_residue(struct stm32_mdma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) struct stm32_mdma_desc *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) u32 curr_hwdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) struct stm32_mdma_hwdesc *hwdesc = desc->node[0].hwdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) u32 cbndtr, residue, modulo, burst_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) residue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) for (i = curr_hwdesc + 1; i < desc->count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) hwdesc = desc->node[i].hwdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) residue += STM32_MDMA_CBNDTR_BNDT(hwdesc->cbndtr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) residue += cbndtr & STM32_MDMA_CBNDTR_BNDT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) if (!chan->mem_burst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) return residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) burst_size = chan->mem_burst * chan->mem_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) modulo = residue % burst_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) if (modulo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) residue = residue - modulo + burst_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) return residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) static enum dma_status stm32_mdma_tx_status(struct dma_chan *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) dma_cookie_t cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) struct dma_tx_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) struct virt_dma_desc *vdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) enum dma_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) u32 residue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) status = dma_cookie_status(c, cookie, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) if ((status == DMA_COMPLETE) || (!state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) spin_lock_irqsave(&chan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) vdesc = vchan_find_desc(&chan->vchan, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) if (chan->desc && cookie == chan->desc->vdesc.tx.cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) residue = stm32_mdma_desc_residue(chan, chan->desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) chan->curr_hwdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) else if (vdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) residue = stm32_mdma_desc_residue(chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) to_stm32_mdma_desc(vdesc), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) dma_set_residue(state, residue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) spin_unlock_irqrestore(&chan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) static void stm32_mdma_xfer_end(struct stm32_mdma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) vchan_cookie_complete(&chan->desc->vdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) chan->desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) chan->busy = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) /* Start the next transfer if this driver has a next desc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) stm32_mdma_start_transfer(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) struct stm32_mdma_device *dmadev = devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) struct stm32_mdma_chan *chan = devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) u32 reg, id, ien, status, flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) /* Find out which channel generates the interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) status = readl_relaxed(dmadev->base + STM32_MDMA_GISR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) id = __ffs(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) status = readl_relaxed(dmadev->base + STM32_MDMA_GISR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) if (!status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) dev_dbg(mdma2dev(dmadev), "spurious it\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) id = __ffs(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) * As GISR0 provides status for channel id from 0 to 31,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) * so GISR1 provides status for channel id from 32 to 62
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) id += 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) chan = &dmadev->chan[id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) if (!chan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) dev_dbg(mdma2dev(dmadev), "MDMA channel not initialized\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) /* Handle interrupt for the channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) spin_lock(&chan->vchan.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(chan->id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) ien = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) ien &= STM32_MDMA_CCR_IRQ_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) ien >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) if (!(status & ien)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) spin_unlock(&chan->vchan.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) dev_dbg(chan2dev(chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) "spurious it (status=0x%04x, ien=0x%04x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) status, ien);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) flag = __ffs(status & ien);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) reg = STM32_MDMA_CIFCR(chan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) switch (1 << flag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) case STM32_MDMA_CISR_TEIF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) id = chan->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) status = readl_relaxed(dmadev->base + STM32_MDMA_CESR(id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) dev_err(chan2dev(chan), "Transfer Err: stat=0x%08x\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CTEIF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) case STM32_MDMA_CISR_CTCIF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CCTCIF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) stm32_mdma_xfer_end(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) case STM32_MDMA_CISR_BRTIF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CBRTIF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) case STM32_MDMA_CISR_BTIF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CBTIF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) chan->curr_hwdesc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) if (chan->desc && chan->desc->cyclic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) if (chan->curr_hwdesc == chan->desc->count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) chan->curr_hwdesc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) vchan_cyclic_callback(&chan->desc->vdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) case STM32_MDMA_CISR_TCIF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CLTCIF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) dev_err(chan2dev(chan), "it %d unhandled (status=0x%04x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 1 << flag, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) spin_unlock(&chan->vchan.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) static int stm32_mdma_alloc_chan_resources(struct dma_chan *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) chan->desc_pool = dmam_pool_create(dev_name(&c->dev->device),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) c->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) sizeof(struct stm32_mdma_hwdesc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) __alignof__(struct stm32_mdma_hwdesc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) if (!chan->desc_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) dev_err(chan2dev(chan), "failed to allocate descriptor pool\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) ret = pm_runtime_resume_and_get(dmadev->ddev.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) ret = stm32_mdma_disable_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) pm_runtime_put(dmadev->ddev.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) static void stm32_mdma_free_chan_resources(struct dma_chan *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) dev_dbg(chan2dev(chan), "Freeing channel %d\n", chan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) if (chan->busy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) spin_lock_irqsave(&chan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) stm32_mdma_stop(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) chan->desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) spin_unlock_irqrestore(&chan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) pm_runtime_put(dmadev->ddev.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) vchan_free_chan_resources(to_virt_chan(c));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) dmam_pool_destroy(chan->desc_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) chan->desc_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) static struct dma_chan *stm32_mdma_of_xlate(struct of_phandle_args *dma_spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) struct of_dma *ofdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) struct stm32_mdma_device *dmadev = ofdma->of_dma_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) struct stm32_mdma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) struct dma_chan *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) struct stm32_mdma_chan_config config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) if (dma_spec->args_count < 5) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) dev_err(mdma2dev(dmadev), "Bad number of args\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) config.request = dma_spec->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) config.priority_level = dma_spec->args[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) config.transfer_config = dma_spec->args[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) config.mask_addr = dma_spec->args[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) config.mask_data = dma_spec->args[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) if (config.request >= dmadev->nr_requests) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) dev_err(mdma2dev(dmadev), "Bad request line\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) if (config.priority_level > STM32_MDMA_VERY_HIGH_PRIORITY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) dev_err(mdma2dev(dmadev), "Priority level not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) c = dma_get_any_slave_channel(&dmadev->ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) if (!c) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) dev_err(mdma2dev(dmadev), "No more channels available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) chan = to_stm32_mdma_chan(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) chan->chan_config = config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) return c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) static const struct of_device_id stm32_mdma_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) { .compatible = "st,stm32h7-mdma", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) { /* sentinel */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) MODULE_DEVICE_TABLE(of, stm32_mdma_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) static int stm32_mdma_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) struct stm32_mdma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) struct stm32_mdma_device *dmadev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) struct dma_device *dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) struct device_node *of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) struct reset_control *rst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) u32 nr_channels, nr_requests;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) int i, count, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) of_node = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) if (!of_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) ret = device_property_read_u32(&pdev->dev, "dma-channels",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) &nr_channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) nr_channels = STM32_MDMA_MAX_CHANNELS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) dev_warn(&pdev->dev, "MDMA defaulting on %i channels\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) nr_channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) ret = device_property_read_u32(&pdev->dev, "dma-requests",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) &nr_requests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) nr_requests = STM32_MDMA_MAX_REQUESTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) dev_warn(&pdev->dev, "MDMA defaulting on %i request lines\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) nr_requests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) count = device_property_count_u32(&pdev->dev, "st,ahb-addr-masks");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) if (count < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev) + sizeof(u32) * count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) if (!dmadev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) dmadev->nr_channels = nr_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) dmadev->nr_requests = nr_requests;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) dmadev->ahb_addr_masks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) dmadev->nr_ahb_addr_masks = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) dmadev->base = devm_ioremap_resource(&pdev->dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) if (IS_ERR(dmadev->base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) return PTR_ERR(dmadev->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) dmadev->clk = devm_clk_get(&pdev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) if (IS_ERR(dmadev->clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) return dev_err_probe(&pdev->dev, PTR_ERR(dmadev->clk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) "Missing clock controller\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) ret = clk_prepare_enable(dmadev->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) rst = devm_reset_control_get(&pdev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) if (IS_ERR(rst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) ret = PTR_ERR(rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) if (ret == -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) goto err_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) reset_control_assert(rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) udelay(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) reset_control_deassert(rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) dd = &dmadev->ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) dma_cap_set(DMA_SLAVE, dd->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) dma_cap_set(DMA_PRIVATE, dd->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) dma_cap_set(DMA_CYCLIC, dd->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) dma_cap_set(DMA_MEMCPY, dd->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) dd->device_alloc_chan_resources = stm32_mdma_alloc_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) dd->device_free_chan_resources = stm32_mdma_free_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) dd->device_tx_status = stm32_mdma_tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) dd->device_issue_pending = stm32_mdma_issue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) dd->device_prep_slave_sg = stm32_mdma_prep_slave_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) dd->device_prep_dma_cyclic = stm32_mdma_prep_dma_cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) dd->device_prep_dma_memcpy = stm32_mdma_prep_dma_memcpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) dd->device_config = stm32_mdma_slave_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) dd->device_pause = stm32_mdma_pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) dd->device_resume = stm32_mdma_resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) dd->device_terminate_all = stm32_mdma_terminate_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) dd->device_synchronize = stm32_mdma_synchronize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) dd->descriptor_reuse = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) dd->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) BIT(DMA_MEM_TO_MEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) dd->max_burst = STM32_MDMA_MAX_BURST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) dd->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) INIT_LIST_HEAD(&dd->channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) for (i = 0; i < dmadev->nr_channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) chan = &dmadev->chan[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) chan->id = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) chan->vchan.desc_free = stm32_mdma_desc_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) vchan_init(&chan->vchan, dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) dmadev->irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) if (dmadev->irq < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) ret = dmadev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) goto err_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) ret = devm_request_irq(&pdev->dev, dmadev->irq, stm32_mdma_irq_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 0, dev_name(&pdev->dev), dmadev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) dev_err(&pdev->dev, "failed to request IRQ\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) goto err_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) ret = dmaenginem_async_device_register(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) goto err_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) ret = of_dma_controller_register(of_node, stm32_mdma_of_xlate, dmadev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) "STM32 MDMA DMA OF registration failed %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) goto err_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) platform_set_drvdata(pdev, dmadev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) pm_runtime_set_active(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) pm_runtime_enable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) pm_runtime_get_noresume(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) pm_runtime_put(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) dev_info(&pdev->dev, "STM32 MDMA driver registered\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) err_clk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) clk_disable_unprepare(dmadev->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) static int stm32_mdma_runtime_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) struct stm32_mdma_device *dmadev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) clk_disable_unprepare(dmadev->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) static int stm32_mdma_runtime_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) struct stm32_mdma_device *dmadev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) ret = clk_prepare_enable(dmadev->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) dev_err(dev, "failed to prepare_enable clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) static int stm32_mdma_pm_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) struct stm32_mdma_device *dmadev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) u32 ccr, id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) ret = pm_runtime_resume_and_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) for (id = 0; id < dmadev->nr_channels; id++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) if (ccr & STM32_MDMA_CCR_EN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) dev_warn(dev, "Suspend is prevented by Chan %i\n", id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) pm_runtime_put_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) pm_runtime_force_suspend(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) static int stm32_mdma_pm_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) return pm_runtime_force_resume(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) static const struct dev_pm_ops stm32_mdma_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) SET_SYSTEM_SLEEP_PM_OPS(stm32_mdma_pm_suspend, stm32_mdma_pm_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) SET_RUNTIME_PM_OPS(stm32_mdma_runtime_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) stm32_mdma_runtime_resume, NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) static struct platform_driver stm32_mdma_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) .probe = stm32_mdma_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) .name = "stm32-mdma",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) .of_match_table = stm32_mdma_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) .pm = &stm32_mdma_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) static int __init stm32_mdma_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) return platform_driver_register(&stm32_mdma_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) subsys_initcall(stm32_mdma_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) MODULE_DESCRIPTION("Driver for STM32 MDMA controller");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) MODULE_AUTHOR("M'boumba Cedric Madianga <cedric.madianga@gmail.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) MODULE_AUTHOR("Pierre-Yves Mordret <pierre-yves.mordret@st.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) MODULE_LICENSE("GPL v2");