^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) // Copyright 2014-2015 Freescale
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) // Copyright 2018 NXP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Driver for NXP Layerscape Queue Direct Memory Access Controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Author:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Wen He <wen.he_1@nxp.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Jiaheng Fan <jiaheng.fan@nxp.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/of_dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "virt-dma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "fsldma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) /* Register related definition */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define FSL_QDMA_DMR 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define FSL_QDMA_DSR 0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define FSL_QDMA_DEIER 0xe00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define FSL_QDMA_DEDR 0xe04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define FSL_QDMA_DECFDW0R 0xe10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define FSL_QDMA_DECFDW1R 0xe14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define FSL_QDMA_DECFDW2R 0xe18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define FSL_QDMA_DECFDW3R 0xe1c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define FSL_QDMA_DECFQIDR 0xe30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define FSL_QDMA_DECBR 0xe34
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define FSL_QDMA_BCQMR(x) (0xc0 + 0x100 * (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define FSL_QDMA_BCQSR(x) (0xc4 + 0x100 * (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define FSL_QDMA_BCQEDPA_SADDR(x) (0xc8 + 0x100 * (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define FSL_QDMA_BCQDPA_SADDR(x) (0xcc + 0x100 * (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define FSL_QDMA_BCQEEPA_SADDR(x) (0xd0 + 0x100 * (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define FSL_QDMA_BCQEPA_SADDR(x) (0xd4 + 0x100 * (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define FSL_QDMA_BCQIER(x) (0xe0 + 0x100 * (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define FSL_QDMA_BCQIDR(x) (0xe4 + 0x100 * (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define FSL_QDMA_SQDPAR 0x80c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define FSL_QDMA_SQEPAR 0x814
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define FSL_QDMA_BSQMR 0x800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define FSL_QDMA_BSQSR 0x804
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define FSL_QDMA_BSQICR 0x828
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define FSL_QDMA_CQMR 0xa00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define FSL_QDMA_CQDSCR1 0xa08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define FSL_QDMA_CQDSCR2 0xa0c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define FSL_QDMA_CQIER 0xa10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define FSL_QDMA_CQEDR 0xa14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define FSL_QDMA_SQCCMR 0xa20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /* Registers for bit and genmask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define FSL_QDMA_CQIDR_SQT BIT(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define QDMA_CCDF_FORMAT BIT(29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define QDMA_CCDF_SER BIT(30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define QDMA_SG_FIN BIT(30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define QDMA_SG_LEN_MASK GENMASK(29, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define QDMA_CCDF_MASK GENMASK(28, 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define FSL_QDMA_DEDR_CLEAR GENMASK(31, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define FSL_QDMA_BCQIDR_CLEAR GENMASK(31, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define FSL_QDMA_DEIER_CLEAR GENMASK(31, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define FSL_QDMA_BCQIER_CQTIE BIT(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define FSL_QDMA_BCQIER_CQPEIE BIT(23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define FSL_QDMA_BSQICR_ICEN BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define FSL_QDMA_BSQICR_ICST(x) ((x) << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define FSL_QDMA_CQIER_MEIE BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define FSL_QDMA_CQIER_TEIE BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define FSL_QDMA_SQCCMR_ENTER_WM BIT(21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define FSL_QDMA_BCQMR_EN BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define FSL_QDMA_BCQMR_EI BIT(30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define FSL_QDMA_BCQMR_CD_THLD(x) ((x) << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define FSL_QDMA_BCQMR_CQ_SIZE(x) ((x) << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define FSL_QDMA_BCQSR_QF BIT(16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define FSL_QDMA_BCQSR_XOFF BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define FSL_QDMA_BSQMR_EN BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define FSL_QDMA_BSQMR_DI BIT(30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define FSL_QDMA_BSQMR_CQ_SIZE(x) ((x) << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define FSL_QDMA_BSQSR_QE BIT(17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define FSL_QDMA_DMR_DQD BIT(30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define FSL_QDMA_DSR_DB BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /* Size related definition */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define FSL_QDMA_QUEUE_MAX 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define FSL_QDMA_COMMAND_BUFFER_SIZE 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define FSL_QDMA_DESCRIPTOR_BUFFER_SIZE 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define FSL_QDMA_CIRCULAR_DESC_SIZE_MIN 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX 16384
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define FSL_QDMA_QUEUE_NUM_MAX 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /* Field definition for CMD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define FSL_QDMA_CMD_RWTTYPE 0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define FSL_QDMA_CMD_LWC 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define FSL_QDMA_CMD_RWTTYPE_OFFSET 28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define FSL_QDMA_CMD_NS_OFFSET 27
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define FSL_QDMA_CMD_DQOS_OFFSET 24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define FSL_QDMA_CMD_WTHROTL_OFFSET 20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define FSL_QDMA_CMD_DSEN_OFFSET 19
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define FSL_QDMA_CMD_LWC_OFFSET 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /* Field definition for Descriptor status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define QDMA_CCDF_STATUS_RTE BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define QDMA_CCDF_STATUS_WTE BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define QDMA_CCDF_STATUS_CDE BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define QDMA_CCDF_STATUS_SDE BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define QDMA_CCDF_STATUS_DDE BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define QDMA_CCDF_STATUS_MASK (QDMA_CCDF_STATUS_RTE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) QDMA_CCDF_STATUS_WTE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) QDMA_CCDF_STATUS_CDE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) QDMA_CCDF_STATUS_SDE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) QDMA_CCDF_STATUS_DDE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /* Field definition for Descriptor offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define QDMA_CCDF_OFFSET 20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define QDMA_SDDF_CMD(x) (((u64)(x)) << 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /* Field definition for safe loop count*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #define FSL_QDMA_HALT_COUNT 1500
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define FSL_QDMA_MAX_SIZE 16385
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define FSL_QDMA_COMP_TIMEOUT 1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #define FSL_COMMAND_QUEUE_OVERFLLOW 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #define FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma_engine, x) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) (((fsl_qdma_engine)->block_offset) * (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * struct fsl_qdma_format - This is the struct holding describing compound
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * descriptor format with qDMA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * @status: Command status and enqueue status notification.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * @cfg: Frame offset and frame format.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * @addr_lo: Holding the compound descriptor of the lower
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * 32-bits address in memory 40-bit address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * @addr_hi: Same as above member, but point high 8-bits in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * memory 40-bit address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * @__reserved1: Reserved field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * @cfg8b_w1: Compound descriptor command queue origin produced
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * by qDMA and dynamic debug field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * @data: Pointer to the memory 40-bit address, describes DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * source information and DMA destination information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct fsl_qdma_format {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) __le32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) __le32 cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) __le32 addr_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) u8 addr_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) u8 __reserved1[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) u8 cfg8b_w1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) __le64 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /* qDMA status notification pre information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct fsl_pre_status {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) u64 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) u8 queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static DEFINE_PER_CPU(struct fsl_pre_status, pre);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct fsl_qdma_chan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct virt_dma_chan vchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) struct virt_dma_desc vdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) enum dma_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) struct fsl_qdma_engine *qdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) struct fsl_qdma_queue *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) struct fsl_qdma_queue {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct fsl_qdma_format *virt_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) struct fsl_qdma_format *virt_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct list_head comp_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) struct list_head comp_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) struct dma_pool *comp_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) struct dma_pool *desc_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) spinlock_t queue_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) dma_addr_t bus_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) u32 n_cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) u32 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct fsl_qdma_format *cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) void __iomem *block_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) struct fsl_qdma_comp {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) dma_addr_t bus_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) dma_addr_t desc_bus_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct fsl_qdma_format *virt_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) struct fsl_qdma_format *desc_virt_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct fsl_qdma_chan *qchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct virt_dma_desc vdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) struct fsl_qdma_engine {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) struct dma_device dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) void __iomem *ctrl_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) void __iomem *status_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) void __iomem *block_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) u32 n_chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) u32 n_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) struct mutex fsl_qdma_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) int error_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) int *queue_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) u32 feature;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) struct fsl_qdma_queue *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct fsl_qdma_queue **status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct fsl_qdma_chan *chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) int block_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) int block_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) int irq_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) int desc_allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) static inline u64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) qdma_ccdf_addr_get64(const struct fsl_qdma_format *ccdf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return le64_to_cpu(ccdf->data) & (U64_MAX >> 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) qdma_desc_addr_set64(struct fsl_qdma_format *ccdf, u64 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) ccdf->addr_hi = upper_32_bits(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) ccdf->addr_lo = cpu_to_le32(lower_32_bits(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static inline u8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) qdma_ccdf_get_queue(const struct fsl_qdma_format *ccdf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return ccdf->cfg8b_w1 & U8_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) qdma_ccdf_get_offset(const struct fsl_qdma_format *ccdf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) return (le32_to_cpu(ccdf->cfg) & QDMA_CCDF_MASK) >> QDMA_CCDF_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) qdma_ccdf_set_format(struct fsl_qdma_format *ccdf, int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) ccdf->cfg = cpu_to_le32(QDMA_CCDF_FORMAT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) (offset << QDMA_CCDF_OFFSET));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) qdma_ccdf_get_status(const struct fsl_qdma_format *ccdf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) return (le32_to_cpu(ccdf->status) & QDMA_CCDF_STATUS_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) qdma_ccdf_set_ser(struct fsl_qdma_format *ccdf, int status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) ccdf->status = cpu_to_le32(QDMA_CCDF_SER | status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) static inline void qdma_csgf_set_len(struct fsl_qdma_format *csgf, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) csgf->cfg = cpu_to_le32(len & QDMA_SG_LEN_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) static inline void qdma_csgf_set_f(struct fsl_qdma_format *csgf, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) csgf->cfg = cpu_to_le32(QDMA_SG_FIN | (len & QDMA_SG_LEN_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static u32 qdma_readl(struct fsl_qdma_engine *qdma, void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return FSL_DMA_IN(qdma, addr, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) static void qdma_writel(struct fsl_qdma_engine *qdma, u32 val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) FSL_DMA_OUT(qdma, addr, val, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) static struct fsl_qdma_chan *to_fsl_qdma_chan(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return container_of(chan, struct fsl_qdma_chan, vchan.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) static struct fsl_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return container_of(vd, struct fsl_qdma_comp, vdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) struct fsl_qdma_comp *comp_temp, *_comp_temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) LIST_HEAD(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) vchan_get_all_descriptors(&fsl_chan->vchan, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (!fsl_queue->comp_pool && !fsl_queue->desc_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) list_for_each_entry_safe(comp_temp, _comp_temp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) &fsl_queue->comp_used, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) dma_pool_free(fsl_queue->comp_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) comp_temp->virt_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) comp_temp->bus_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) dma_pool_free(fsl_queue->desc_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) comp_temp->desc_virt_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) comp_temp->desc_bus_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) list_del(&comp_temp->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) kfree(comp_temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) list_for_each_entry_safe(comp_temp, _comp_temp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) &fsl_queue->comp_free, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) dma_pool_free(fsl_queue->comp_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) comp_temp->virt_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) comp_temp->bus_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) dma_pool_free(fsl_queue->desc_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) comp_temp->desc_virt_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) comp_temp->desc_bus_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) list_del(&comp_temp->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) kfree(comp_temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) dma_pool_destroy(fsl_queue->comp_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) dma_pool_destroy(fsl_queue->desc_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) fsl_qdma->desc_allocated--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) fsl_queue->comp_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) fsl_queue->desc_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) dma_addr_t dst, dma_addr_t src, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) u32 cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) struct fsl_qdma_format *sdf, *ddf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) ccdf = fsl_comp->virt_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) csgf_desc = fsl_comp->virt_addr + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) csgf_src = fsl_comp->virt_addr + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) csgf_dest = fsl_comp->virt_addr + 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) sdf = fsl_comp->desc_virt_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) ddf = fsl_comp->desc_virt_addr + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) memset(fsl_comp->virt_addr, 0, FSL_QDMA_COMMAND_BUFFER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) memset(fsl_comp->desc_virt_addr, 0, FSL_QDMA_DESCRIPTOR_BUFFER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) /* Head Command Descriptor(Frame Descriptor) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) qdma_desc_addr_set64(ccdf, fsl_comp->bus_addr + 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(ccdf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(ccdf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) /* Status notification is enqueued to status queue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) /* Compound Command Descriptor(Frame List Table) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) qdma_desc_addr_set64(csgf_desc, fsl_comp->desc_bus_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) /* It must be 32 as Compound S/G Descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) qdma_csgf_set_len(csgf_desc, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) qdma_desc_addr_set64(csgf_src, src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) qdma_csgf_set_len(csgf_src, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) qdma_desc_addr_set64(csgf_dest, dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) qdma_csgf_set_len(csgf_dest, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) /* This entry is the last entry. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) qdma_csgf_set_f(csgf_dest, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) /* Descriptor Buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) FSL_QDMA_CMD_RWTTYPE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) sdf->data = QDMA_SDDF_CMD(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) FSL_QDMA_CMD_RWTTYPE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) cmd |= cpu_to_le32(FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) ddf->data = QDMA_SDDF_CMD(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * Pre-request full command descriptor for enqueue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) static int fsl_qdma_pre_request_enqueue_desc(struct fsl_qdma_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) struct fsl_qdma_comp *comp_temp, *_comp_temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) for (i = 0; i < queue->n_cq + FSL_COMMAND_QUEUE_OVERFLLOW; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (!comp_temp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) goto err_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) comp_temp->virt_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) dma_pool_alloc(queue->comp_pool, GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) &comp_temp->bus_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (!comp_temp->virt_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) goto err_dma_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) comp_temp->desc_virt_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) dma_pool_alloc(queue->desc_pool, GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) &comp_temp->desc_bus_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (!comp_temp->desc_virt_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) goto err_desc_dma_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) list_add_tail(&comp_temp->list, &queue->comp_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) err_desc_dma_alloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) dma_pool_free(queue->comp_pool, comp_temp->virt_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) comp_temp->bus_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) err_dma_alloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) kfree(comp_temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) err_alloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) list_for_each_entry_safe(comp_temp, _comp_temp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) &queue->comp_free, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (comp_temp->virt_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) dma_pool_free(queue->comp_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) comp_temp->virt_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) comp_temp->bus_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (comp_temp->desc_virt_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) dma_pool_free(queue->desc_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) comp_temp->desc_virt_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) comp_temp->desc_bus_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) list_del(&comp_temp->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) kfree(comp_temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * Request a command descriptor for enqueue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) static struct fsl_qdma_comp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) *fsl_qdma_request_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) struct fsl_qdma_comp *comp_temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) int timeout = FSL_QDMA_COMP_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) struct fsl_qdma_queue *queue = fsl_chan->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) while (timeout--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) spin_lock_irqsave(&queue->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (!list_empty(&queue->comp_free)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) comp_temp = list_first_entry(&queue->comp_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) struct fsl_qdma_comp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) list_del(&comp_temp->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) spin_unlock_irqrestore(&queue->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) comp_temp->qchan = fsl_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) return comp_temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) spin_unlock_irqrestore(&queue->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) static struct fsl_qdma_queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) *fsl_qdma_alloc_queue_resources(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) struct fsl_qdma_engine *fsl_qdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) int ret, len, i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) int queue_num, block_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) struct fsl_qdma_queue *queue_head, *queue_temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) queue_num = fsl_qdma->n_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) block_number = fsl_qdma->block_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) if (queue_num > FSL_QDMA_QUEUE_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) queue_num = FSL_QDMA_QUEUE_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) len = sizeof(*queue_head) * queue_num * block_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) queue_head = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (!queue_head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) ret = device_property_read_u32_array(&pdev->dev, "queue-sizes",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) queue_size, queue_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) dev_err(&pdev->dev, "Can't get queue-sizes.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) for (j = 0; j < block_number; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) for (i = 0; i < queue_num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) "Get wrong queue-sizes.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) queue_temp = queue_head + i + (j * queue_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) queue_temp->cq =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) dma_alloc_coherent(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) sizeof(struct fsl_qdma_format) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) queue_size[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) &queue_temp->bus_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) if (!queue_temp->cq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) queue_temp->block_base = fsl_qdma->block_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) queue_temp->n_cq = queue_size[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) queue_temp->id = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) queue_temp->virt_head = queue_temp->cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) queue_temp->virt_tail = queue_temp->cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * List for queue command buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) INIT_LIST_HEAD(&queue_temp->comp_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) spin_lock_init(&queue_temp->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) return queue_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) static struct fsl_qdma_queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) *fsl_qdma_prep_status_queue(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) unsigned int status_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) struct fsl_qdma_queue *status_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) struct device_node *np = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) ret = of_property_read_u32(np, "status-sizes", &status_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) dev_err(&pdev->dev, "Can't get status-sizes.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) dev_err(&pdev->dev, "Get wrong status_size.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) status_head = devm_kzalloc(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) sizeof(*status_head), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (!status_head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * Buffer for queue command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) status_head->cq = dma_alloc_coherent(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) sizeof(struct fsl_qdma_format) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) status_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) &status_head->bus_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (!status_head->cq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) devm_kfree(&pdev->dev, status_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) status_head->n_cq = status_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) status_head->virt_head = status_head->cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) status_head->virt_tail = status_head->cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) status_head->comp_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) return status_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) int i, j, count = FSL_QDMA_HALT_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) void __iomem *block, *ctrl = fsl_qdma->ctrl_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) /* Disable the command queue and wait for idle state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) reg |= FSL_QDMA_DMR_DQD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) for (j = 0; j < fsl_qdma->block_number; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) block = fsl_qdma->block_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) for (i = 0; i < FSL_QDMA_QUEUE_NUM_MAX; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQMR(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) if (!(reg & FSL_QDMA_DSR_DB))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) if (count-- < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) for (j = 0; j < fsl_qdma->block_number; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) block = fsl_qdma->block_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) /* Disable status queue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BSQMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * clear the command queue interrupt detect register for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * all queues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) block + FSL_QDMA_BCQIDR(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) void *block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) int id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) bool duplicate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) u32 reg, i, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) u8 completion_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) struct fsl_qdma_queue *temp_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) struct fsl_qdma_format *status_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) struct fsl_qdma_comp *fsl_comp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) struct fsl_qdma_queue *fsl_status = fsl_qdma->status[id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) count = FSL_QDMA_MAX_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) while (count--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) duplicate = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) if (reg & FSL_QDMA_BSQSR_QE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) status_addr = fsl_status->virt_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if (qdma_ccdf_get_queue(status_addr) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) __this_cpu_read(pre.queue) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) qdma_ccdf_addr_get64(status_addr) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) __this_cpu_read(pre.addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) duplicate = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) i = qdma_ccdf_get_queue(status_addr) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) id * fsl_qdma->n_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) __this_cpu_write(pre.addr, qdma_ccdf_addr_get64(status_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) __this_cpu_write(pre.queue, qdma_ccdf_get_queue(status_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) temp_queue = fsl_queue + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) spin_lock(&temp_queue->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (list_empty(&temp_queue->comp_used)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (!duplicate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) spin_unlock(&temp_queue->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) fsl_comp = list_first_entry(&temp_queue->comp_used,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) struct fsl_qdma_comp, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) if (fsl_comp->bus_addr + 16 !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) __this_cpu_read(pre.addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) if (!duplicate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) spin_unlock(&temp_queue->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) if (duplicate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) reg |= FSL_QDMA_BSQMR_DI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) qdma_desc_addr_set64(status_addr, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) fsl_status->virt_head++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) if (fsl_status->virt_head == fsl_status->cq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) + fsl_status->n_cq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) fsl_status->virt_head = fsl_status->cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) spin_unlock(&temp_queue->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) list_del(&fsl_comp->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) completion_status = qdma_ccdf_get_status(status_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) reg |= FSL_QDMA_BSQMR_DI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) qdma_desc_addr_set64(status_addr, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) fsl_status->virt_head++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) fsl_status->virt_head = fsl_status->cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) spin_unlock(&temp_queue->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) /* The completion_status is evaluated here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) * (outside of spin lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) if (completion_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) /* A completion error occurred! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) if (completion_status & QDMA_CCDF_STATUS_WTE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) /* Write transaction error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) fsl_comp->vdesc.tx_result.result =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) DMA_TRANS_WRITE_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) } else if (completion_status & QDMA_CCDF_STATUS_RTE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) /* Read transaction error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) fsl_comp->vdesc.tx_result.result =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) DMA_TRANS_READ_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) /* Command/source/destination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) * description error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) fsl_comp->vdesc.tx_result.result =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) DMA_TRANS_ABORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) dev_err(fsl_qdma->dma_dev.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) "DMA status descriptor error %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) completion_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) spin_lock(&fsl_comp->qchan->vchan.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) vchan_cookie_complete(&fsl_comp->vdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) fsl_comp->qchan->status = DMA_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) spin_unlock(&fsl_comp->qchan->vchan.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) static irqreturn_t fsl_qdma_error_handler(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) unsigned int intr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) struct fsl_qdma_engine *fsl_qdma = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) void __iomem *status = fsl_qdma->status_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) unsigned int decfdw0r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) unsigned int decfdw1r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) unsigned int decfdw2r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) unsigned int decfdw3r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) if (intr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) decfdw0r = qdma_readl(fsl_qdma, status + FSL_QDMA_DECFDW0R);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) decfdw1r = qdma_readl(fsl_qdma, status + FSL_QDMA_DECFDW1R);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) decfdw2r = qdma_readl(fsl_qdma, status + FSL_QDMA_DECFDW2R);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) decfdw3r = qdma_readl(fsl_qdma, status + FSL_QDMA_DECFDW3R);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) dev_err(fsl_qdma->dma_dev.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) "DMA transaction error! (%x: %x-%x-%x-%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) intr, decfdw0r, decfdw1r, decfdw2r, decfdw3r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) qdma_writel(fsl_qdma, FSL_QDMA_DEDR_CLEAR, status + FSL_QDMA_DEDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) static irqreturn_t fsl_qdma_queue_handler(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) unsigned int intr, reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) struct fsl_qdma_engine *fsl_qdma = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) void __iomem *block, *ctrl = fsl_qdma->ctrl_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) id = irq - fsl_qdma->irq_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) if (id < 0 && id > fsl_qdma->block_number) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) dev_err(fsl_qdma->dma_dev.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) "irq %d is wrong irq_base is %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) irq, fsl_qdma->irq_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) block = fsl_qdma->block_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) intr = qdma_readl(fsl_qdma, block + FSL_QDMA_BCQIDR(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if ((intr & FSL_QDMA_CQIDR_SQT) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) if (intr != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) reg |= FSL_QDMA_DMR_DQD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQIER(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) dev_err(fsl_qdma->dma_dev.dev, "QDMA: status err!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) /* Clear all detected events and interrupts. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) block + FSL_QDMA_BCQIDR(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) fsl_qdma_irq_init(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) struct fsl_qdma_engine *fsl_qdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) char irq_name[20];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) fsl_qdma->error_irq =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) platform_get_irq_byname(pdev, "qdma-error");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) if (fsl_qdma->error_irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) return fsl_qdma->error_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) ret = devm_request_irq(&pdev->dev, fsl_qdma->error_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) fsl_qdma_error_handler, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) "qDMA error", fsl_qdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) dev_err(&pdev->dev, "Can't register qDMA controller IRQ.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) for (i = 0; i < fsl_qdma->block_number; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) sprintf(irq_name, "qdma-queue%d", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) fsl_qdma->queue_irq[i] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) platform_get_irq_byname(pdev, irq_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) if (fsl_qdma->queue_irq[i] < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) return fsl_qdma->queue_irq[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) ret = devm_request_irq(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) fsl_qdma->queue_irq[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) fsl_qdma_queue_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) "qDMA queue",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) fsl_qdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) "Can't register qDMA queue IRQ.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) cpu = i % num_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) ret = irq_set_affinity_hint(fsl_qdma->queue_irq[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) get_cpu_mask(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) "Can't set cpu %d affinity to IRQ %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) fsl_qdma->queue_irq[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) static void fsl_qdma_irq_exit(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) struct fsl_qdma_engine *fsl_qdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) devm_free_irq(&pdev->dev, fsl_qdma->error_irq, fsl_qdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) for (i = 0; i < fsl_qdma->block_number; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) devm_free_irq(&pdev->dev, fsl_qdma->queue_irq[i], fsl_qdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) static int fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) int i, j, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) struct fsl_qdma_queue *temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) void __iomem *status = fsl_qdma->status_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) void __iomem *block, *ctrl = fsl_qdma->ctrl_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) /* Try to halt the qDMA engine first. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) ret = fsl_qdma_halt(fsl_qdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) dev_err(fsl_qdma->dma_dev.dev, "DMA halt failed!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) for (i = 0; i < fsl_qdma->block_number; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) * Clear the command queue interrupt detect register for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) * all queues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) block = fsl_qdma->block_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) block + FSL_QDMA_BCQIDR(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) for (j = 0; j < fsl_qdma->block_number; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) block = fsl_qdma->block_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) for (i = 0; i < fsl_qdma->n_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) temp = fsl_queue + i + (j * fsl_qdma->n_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) * Initialize Command Queue registers to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) * point to the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) * command descriptor in memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) * Dequeue Pointer Address Registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) * Enqueue Pointer Address Registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) qdma_writel(fsl_qdma, temp->bus_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) block + FSL_QDMA_BCQDPA_SADDR(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) qdma_writel(fsl_qdma, temp->bus_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) block + FSL_QDMA_BCQEPA_SADDR(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) /* Initialize the queue mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) reg = FSL_QDMA_BCQMR_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq) - 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq) - 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BCQMR(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) * Workaround for erratum: ERR010812.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) * We must enable XOFF to avoid the enqueue rejection occurs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) * Setting SQCCMR ENTER_WM to 0x20.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) qdma_writel(fsl_qdma, FSL_QDMA_SQCCMR_ENTER_WM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) block + FSL_QDMA_SQCCMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) * Initialize status queue registers to point to the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) * command descriptor in memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) * Dequeue Pointer Address Registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) * Enqueue Pointer Address Registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) qdma_writel(fsl_qdma, fsl_qdma->status[j]->bus_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) block + FSL_QDMA_SQEPAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) qdma_writel(fsl_qdma, fsl_qdma->status[j]->bus_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) block + FSL_QDMA_SQDPAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) /* Initialize status queue interrupt. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) qdma_writel(fsl_qdma, FSL_QDMA_BCQIER_CQTIE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) block + FSL_QDMA_BCQIER(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) qdma_writel(fsl_qdma, FSL_QDMA_BSQICR_ICEN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) FSL_QDMA_BSQICR_ICST(5) | 0x8000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) block + FSL_QDMA_BSQICR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) qdma_writel(fsl_qdma, FSL_QDMA_CQIER_MEIE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) FSL_QDMA_CQIER_TEIE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) block + FSL_QDMA_CQIER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) /* Initialize the status queue mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) reg = FSL_QDMA_BSQMR_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) reg |= FSL_QDMA_BSQMR_CQ_SIZE(ilog2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) (fsl_qdma->status[j]->n_cq) - 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) /* Initialize controller interrupt register. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) qdma_writel(fsl_qdma, FSL_QDMA_DEDR_CLEAR, status + FSL_QDMA_DEDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) qdma_writel(fsl_qdma, FSL_QDMA_DEIER_CLEAR, status + FSL_QDMA_DEIER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) reg &= ~FSL_QDMA_DMR_DQD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) fsl_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) dma_addr_t src, size_t len, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) struct fsl_qdma_comp *fsl_comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) fsl_comp = fsl_qdma_request_enqueue_desc(fsl_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) if (!fsl_comp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) static void fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) struct virt_dma_desc *vdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) struct fsl_qdma_comp *fsl_comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) void __iomem *block = fsl_queue->block_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQSR(fsl_queue->id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) if (reg & (FSL_QDMA_BCQSR_QF | FSL_QDMA_BCQSR_XOFF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) vdesc = vchan_next_desc(&fsl_chan->vchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) if (!vdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) list_del(&vdesc->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) fsl_comp = to_fsl_qdma_comp(vdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) memcpy(fsl_queue->virt_head++,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) fsl_comp->virt_addr, sizeof(struct fsl_qdma_format));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) fsl_queue->virt_head = fsl_queue->cq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) list_add_tail(&fsl_comp->list, &fsl_queue->comp_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQMR(fsl_queue->id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) reg |= FSL_QDMA_BCQMR_EI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) qdma_writel(fsl_chan->qdma, reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) fsl_chan->status = DMA_IN_PROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) static void fsl_qdma_free_desc(struct virt_dma_desc *vdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) struct fsl_qdma_comp *fsl_comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) struct fsl_qdma_queue *fsl_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) fsl_comp = to_fsl_qdma_comp(vdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) fsl_queue = fsl_comp->qchan->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) spin_lock_irqsave(&fsl_queue->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) list_add_tail(&fsl_comp->list, &fsl_queue->comp_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) static void fsl_qdma_issue_pending(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) spin_lock_irqsave(&fsl_queue->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) spin_lock(&fsl_chan->vchan.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) if (vchan_issue_pending(&fsl_chan->vchan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) fsl_qdma_enqueue_desc(fsl_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) spin_unlock(&fsl_chan->vchan.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) static void fsl_qdma_synchronize(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) vchan_synchronize(&fsl_chan->vchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) static int fsl_qdma_terminate_all(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) LIST_HEAD(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) vchan_get_all_descriptors(&fsl_chan->vchan, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) static int fsl_qdma_alloc_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) if (fsl_queue->comp_pool && fsl_queue->desc_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) return fsl_qdma->desc_allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) INIT_LIST_HEAD(&fsl_queue->comp_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) * The dma pool for queue command buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) fsl_queue->comp_pool =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) dma_pool_create("comp_pool",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) chan->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) FSL_QDMA_COMMAND_BUFFER_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 64, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) if (!fsl_queue->comp_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) * The dma pool for Descriptor(SD/DD) buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) fsl_queue->desc_pool =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) dma_pool_create("desc_pool",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) chan->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) FSL_QDMA_DESCRIPTOR_BUFFER_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 32, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) if (!fsl_queue->desc_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) goto err_desc_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) ret = fsl_qdma_pre_request_enqueue_desc(fsl_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) dev_err(chan->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) "failed to alloc dma buffer for S/G descriptor\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) goto err_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) fsl_qdma->desc_allocated++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) return fsl_qdma->desc_allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) err_mem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) dma_pool_destroy(fsl_queue->desc_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) err_desc_pool:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) dma_pool_destroy(fsl_queue->comp_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) static int fsl_qdma_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) int blk_num, blk_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) u32 len, chans, queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) struct fsl_qdma_chan *fsl_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) struct fsl_qdma_engine *fsl_qdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) struct device_node *np = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) ret = of_property_read_u32(np, "dma-channels", &chans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) dev_err(&pdev->dev, "Can't get dma-channels.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) ret = of_property_read_u32(np, "block-offset", &blk_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) dev_err(&pdev->dev, "Can't get block-offset.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) ret = of_property_read_u32(np, "block-number", &blk_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) dev_err(&pdev->dev, "Can't get block-number.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) blk_num = min_t(int, blk_num, num_online_cpus());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) len = sizeof(*fsl_qdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) fsl_qdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) if (!fsl_qdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) len = sizeof(*fsl_chan) * chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) fsl_qdma->chans = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) if (!fsl_qdma->chans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) len = sizeof(struct fsl_qdma_queue *) * blk_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) fsl_qdma->status = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) if (!fsl_qdma->status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) len = sizeof(int) * blk_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) fsl_qdma->queue_irq = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) if (!fsl_qdma->queue_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) ret = of_property_read_u32(np, "fsl,dma-queues", &queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) dev_err(&pdev->dev, "Can't get queues.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) fsl_qdma->desc_allocated = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) fsl_qdma->n_chans = chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) fsl_qdma->n_queues = queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) fsl_qdma->block_number = blk_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) fsl_qdma->block_offset = blk_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) mutex_init(&fsl_qdma->fsl_qdma_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) for (i = 0; i < fsl_qdma->block_number; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) fsl_qdma->status[i] = fsl_qdma_prep_status_queue(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) if (!fsl_qdma->status[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) fsl_qdma->ctrl_base = devm_ioremap_resource(&pdev->dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) if (IS_ERR(fsl_qdma->ctrl_base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) return PTR_ERR(fsl_qdma->ctrl_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) fsl_qdma->status_base = devm_ioremap_resource(&pdev->dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) if (IS_ERR(fsl_qdma->status_base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) return PTR_ERR(fsl_qdma->status_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) fsl_qdma->block_base = devm_ioremap_resource(&pdev->dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) if (IS_ERR(fsl_qdma->block_base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) return PTR_ERR(fsl_qdma->block_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) fsl_qdma->queue = fsl_qdma_alloc_queue_resources(pdev, fsl_qdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) if (!fsl_qdma->queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) ret = fsl_qdma_irq_init(pdev, fsl_qdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) if (fsl_qdma->irq_base < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) return fsl_qdma->irq_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) fsl_qdma->feature = of_property_read_bool(np, "big-endian");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) for (i = 0; i < fsl_qdma->n_chans; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) fsl_chan->qdma = fsl_qdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) fsl_chan->queue = fsl_qdma->queue + i % (fsl_qdma->n_queues *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) fsl_qdma->block_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) fsl_chan->vchan.desc_free = fsl_qdma_free_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) vchan_init(&fsl_chan->vchan, &fsl_qdma->dma_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) dma_cap_set(DMA_MEMCPY, fsl_qdma->dma_dev.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) fsl_qdma->dma_dev.dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) fsl_qdma->dma_dev.device_free_chan_resources =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) fsl_qdma_free_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) fsl_qdma->dma_dev.device_alloc_chan_resources =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) fsl_qdma_alloc_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) fsl_qdma->dma_dev.device_tx_status = dma_cookie_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) fsl_qdma->dma_dev.device_prep_dma_memcpy = fsl_qdma_prep_memcpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) fsl_qdma->dma_dev.device_issue_pending = fsl_qdma_issue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) fsl_qdma->dma_dev.device_synchronize = fsl_qdma_synchronize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) fsl_qdma->dma_dev.device_terminate_all = fsl_qdma_terminate_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) dev_err(&pdev->dev, "dma_set_mask failure.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) platform_set_drvdata(pdev, fsl_qdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) ret = dma_async_device_register(&fsl_qdma->dma_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) "Can't register NXP Layerscape qDMA engine.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) ret = fsl_qdma_reg_init(fsl_qdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) static void fsl_qdma_cleanup_vchan(struct dma_device *dmadev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) struct fsl_qdma_chan *chan, *_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) list_for_each_entry_safe(chan, _chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) &dmadev->channels, vchan.chan.device_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) list_del(&chan->vchan.chan.device_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) tasklet_kill(&chan->vchan.task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) static int fsl_qdma_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) struct fsl_qdma_queue *status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) struct device_node *np = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) fsl_qdma_irq_exit(pdev, fsl_qdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) fsl_qdma_cleanup_vchan(&fsl_qdma->dma_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) of_dma_controller_free(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) dma_async_device_unregister(&fsl_qdma->dma_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) for (i = 0; i < fsl_qdma->block_number; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) status = fsl_qdma->status[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_format) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) status->n_cq, status->cq, status->bus_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) static const struct of_device_id fsl_qdma_dt_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) { .compatible = "fsl,ls1021a-qdma", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) { /* sentinel */ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) MODULE_DEVICE_TABLE(of, fsl_qdma_dt_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) static struct platform_driver fsl_qdma_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) .name = "fsl-qdma",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) .of_match_table = fsl_qdma_dt_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) .probe = fsl_qdma_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) .remove = fsl_qdma_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) module_platform_driver(fsl_qdma_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) MODULE_ALIAS("platform:fsl-qdma");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) MODULE_DESCRIPTION("NXP Layerscape qDMA engine driver");