^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Driver for the Synopsys DesignWare AHB DMA Controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2005-2007 Atmel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2010-2011 ST Microelectronics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2016 Intel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/io-64-nonatomic-hi-lo.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define DW_DMA_MAX_NR_REQUESTS 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) /* flow controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) enum dw_dma_fc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) DW_DMA_FC_D_M2M,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) DW_DMA_FC_D_M2P,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) DW_DMA_FC_D_P2M,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) DW_DMA_FC_D_P2P,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) DW_DMA_FC_P_P2M,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) DW_DMA_FC_SP_P2P,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) DW_DMA_FC_P_M2P,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) DW_DMA_FC_DP_P2P,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * Redefine this macro to handle differences between 32- and 64-bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * addressing, big vs. little endian, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define DW_REG(name) u32 name; u32 __pad_##name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) /* Hardware register definitions. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct dw_dma_chan_regs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) DW_REG(SAR); /* Source Address Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) DW_REG(DAR); /* Destination Address Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) DW_REG(LLP); /* Linked List Pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) u32 CTL_LO; /* Control Register Low */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) u32 CTL_HI; /* Control Register High */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) DW_REG(SSTAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) DW_REG(DSTAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) DW_REG(SSTATAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) DW_REG(DSTATAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) u32 CFG_LO; /* Configuration Register Low */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) u32 CFG_HI; /* Configuration Register High */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) DW_REG(SGR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) DW_REG(DSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct dw_dma_irq_regs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) DW_REG(XFER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) DW_REG(BLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) DW_REG(SRC_TRAN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) DW_REG(DST_TRAN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) DW_REG(ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct dw_dma_regs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) /* per-channel registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct dw_dma_chan_regs CHAN[DW_DMA_MAX_NR_CHANNELS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /* irq handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct dw_dma_irq_regs RAW; /* r */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct dw_dma_irq_regs STATUS; /* r (raw & mask) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct dw_dma_irq_regs MASK; /* rw (set = irq enabled) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct dw_dma_irq_regs CLEAR; /* w (ack, affects "raw") */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) DW_REG(STATUS_INT); /* r */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /* software handshaking */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) DW_REG(REQ_SRC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) DW_REG(REQ_DST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) DW_REG(SGL_REQ_SRC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) DW_REG(SGL_REQ_DST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) DW_REG(LAST_SRC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) DW_REG(LAST_DST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /* miscellaneous */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) DW_REG(CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) DW_REG(CH_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) DW_REG(ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) DW_REG(TEST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /* iDMA 32-bit support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) DW_REG(CLASS_PRIORITY0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) DW_REG(CLASS_PRIORITY1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /* optional encoded params, 0x3c8..0x3f7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) u32 __reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /* per-channel configuration registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) u32 DWC_PARAMS[DW_DMA_MAX_NR_CHANNELS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) u32 MULTI_BLK_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) u32 MAX_BLK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /* top-level parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) u32 DW_PARAMS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /* component ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) u32 COMP_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) u32 COMP_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* iDMA 32-bit support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) DW_REG(FIFO_PARTITION0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) DW_REG(FIFO_PARTITION1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) DW_REG(SAI_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) DW_REG(GLOBAL_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /* Bitfields in DW_PARAMS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define DW_PARAMS_NR_CHAN 8 /* number of channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define DW_PARAMS_NR_MASTER 11 /* number of AHB masters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define DW_PARAMS_DATA_WIDTH(n) (15 + 2 * (n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define DW_PARAMS_DATA_WIDTH1 15 /* master 1 data width */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define DW_PARAMS_DATA_WIDTH2 17 /* master 2 data width */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define DW_PARAMS_DATA_WIDTH3 19 /* master 3 data width */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define DW_PARAMS_DATA_WIDTH4 21 /* master 4 data width */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define DW_PARAMS_EN 28 /* encoded parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /* Bitfields in DWC_PARAMS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define DWC_PARAMS_MBLK_EN 11 /* multi block transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define DWC_PARAMS_HC_LLP 13 /* set LLP register to zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define DWC_PARAMS_MSIZE 16 /* max group transaction size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /* bursts size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) enum dw_dma_msize {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) DW_DMA_MSIZE_1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) DW_DMA_MSIZE_4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) DW_DMA_MSIZE_8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) DW_DMA_MSIZE_16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) DW_DMA_MSIZE_32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) DW_DMA_MSIZE_64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) DW_DMA_MSIZE_128,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) DW_DMA_MSIZE_256,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /* Bitfields in LLP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #define DWC_LLP_LMS(x) ((x) & 3) /* list master select */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #define DWC_LLP_LOC(x) ((x) & ~3) /* next lli */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) /* Bitfields in CTL_LO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #define DWC_CTLL_INT_EN (1 << 0) /* irqs enabled? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #define DWC_CTLL_DST_WIDTH(n) ((n)<<1) /* bytes per element */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #define DWC_CTLL_SRC_WIDTH(n) ((n)<<4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #define DWC_CTLL_DST_INC (0<<7) /* DAR update/not */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #define DWC_CTLL_DST_DEC (1<<7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #define DWC_CTLL_DST_FIX (2<<7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #define DWC_CTLL_SRC_INC (0<<9) /* SAR update/not */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #define DWC_CTLL_SRC_DEC (1<<9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #define DWC_CTLL_SRC_FIX (2<<9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #define DWC_CTLL_DST_MSIZE(n) ((n)<<11) /* burst, #elements */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #define DWC_CTLL_SRC_MSIZE(n) ((n)<<14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #define DWC_CTLL_S_GATH_EN (1 << 17) /* src gather, !FIX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #define DWC_CTLL_D_SCAT_EN (1 << 18) /* dst scatter, !FIX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #define DWC_CTLL_FC(n) ((n) << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) #define DWC_CTLL_FC_M2M (0 << 20) /* mem-to-mem */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) #define DWC_CTLL_FC_M2P (1 << 20) /* mem-to-periph */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) #define DWC_CTLL_FC_P2M (2 << 20) /* periph-to-mem */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) #define DWC_CTLL_FC_P2P (3 << 20) /* periph-to-periph */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /* plus 4 transfer types for peripheral-as-flow-controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) #define DWC_CTLL_DMS(n) ((n)<<23) /* dst master select */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) #define DWC_CTLL_SMS(n) ((n)<<25) /* src master select */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) #define DWC_CTLL_LLP_D_EN (1 << 27) /* dest block chain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) #define DWC_CTLL_LLP_S_EN (1 << 28) /* src block chain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) /* Bitfields in CTL_HI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #define DWC_CTLH_BLOCK_TS_MASK GENMASK(11, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) #define DWC_CTLH_BLOCK_TS(x) ((x) & DWC_CTLH_BLOCK_TS_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #define DWC_CTLH_DONE (1 << 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /* Bitfields in CFG_LO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) #define DWC_CFGL_CH_PRIOR_MASK (0x7 << 5) /* priority mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) #define DWC_CFGL_CH_PRIOR(x) ((x) << 5) /* priority */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #define DWC_CFGL_CH_SUSP (1 << 8) /* pause xfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) #define DWC_CFGL_FIFO_EMPTY (1 << 9) /* pause xfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) #define DWC_CFGL_HS_DST (1 << 10) /* handshake w/dst */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) #define DWC_CFGL_HS_SRC (1 << 11) /* handshake w/src */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) #define DWC_CFGL_LOCK_CH_XFER (0 << 12) /* scope of LOCK_CH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #define DWC_CFGL_LOCK_CH_BLOCK (1 << 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #define DWC_CFGL_LOCK_CH_XACT (2 << 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) #define DWC_CFGL_LOCK_BUS_XFER (0 << 14) /* scope of LOCK_BUS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) #define DWC_CFGL_LOCK_BUS_BLOCK (1 << 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) #define DWC_CFGL_LOCK_BUS_XACT (2 << 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) #define DWC_CFGL_LOCK_CH (1 << 15) /* channel lockout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) #define DWC_CFGL_LOCK_BUS (1 << 16) /* busmaster lockout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) #define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #define DWC_CFGL_MAX_BURST(x) ((x) << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) #define DWC_CFGL_RELOAD_SAR (1 << 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) #define DWC_CFGL_RELOAD_DAR (1 << 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /* Bitfields in CFG_HI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) #define DWC_CFGH_FCMODE (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) #define DWC_CFGH_FIFO_MODE (1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) #define DWC_CFGH_PROTCTL(x) ((x) << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) #define DWC_CFGH_PROTCTL_DATA (0 << 2) /* data access - always set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) #define DWC_CFGH_PROTCTL_PRIV (1 << 2) /* privileged -> AHB HPROT[1] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) #define DWC_CFGH_PROTCTL_BUFFER (2 << 2) /* bufferable -> AHB HPROT[2] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) #define DWC_CFGH_PROTCTL_CACHE (4 << 2) /* cacheable -> AHB HPROT[3] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) #define DWC_CFGH_DS_UPD_EN (1 << 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) #define DWC_CFGH_SS_UPD_EN (1 << 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) #define DWC_CFGH_SRC_PER(x) ((x) << 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) #define DWC_CFGH_DST_PER(x) ((x) << 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /* Bitfields in SGR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) #define DWC_SGR_SGI(x) ((x) << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) #define DWC_SGR_SGC(x) ((x) << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) /* Bitfields in DSR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) #define DWC_DSR_DSI(x) ((x) << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) #define DWC_DSR_DSC(x) ((x) << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /* Bitfields in CFG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) #define DW_CFG_DMA_EN (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) /* iDMA 32-bit support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) /* bursts size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) enum idma32_msize {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) IDMA32_MSIZE_1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) IDMA32_MSIZE_2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) IDMA32_MSIZE_4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) IDMA32_MSIZE_8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) IDMA32_MSIZE_16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) IDMA32_MSIZE_32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) /* Bitfields in CTL_HI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) #define IDMA32C_CTLH_BLOCK_TS_MASK GENMASK(16, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) #define IDMA32C_CTLH_BLOCK_TS(x) ((x) & IDMA32C_CTLH_BLOCK_TS_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) #define IDMA32C_CTLH_DONE (1 << 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /* Bitfields in CFG_LO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) #define IDMA32C_CFGL_DST_BURST_ALIGN (1 << 0) /* dst burst align */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) #define IDMA32C_CFGL_SRC_BURST_ALIGN (1 << 1) /* src burst align */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) #define IDMA32C_CFGL_CH_DRAIN (1 << 10) /* drain FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) #define IDMA32C_CFGL_DST_OPT_BL (1 << 20) /* optimize dst burst length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) #define IDMA32C_CFGL_SRC_OPT_BL (1 << 21) /* optimize src burst length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) /* Bitfields in CFG_HI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) #define IDMA32C_CFGH_SRC_PER(x) ((x) << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) #define IDMA32C_CFGH_DST_PER(x) ((x) << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) #define IDMA32C_CFGH_RD_ISSUE_THD(x) ((x) << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) #define IDMA32C_CFGH_RW_ISSUE_THD(x) ((x) << 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) #define IDMA32C_CFGH_SRC_PER_EXT(x) ((x) << 28) /* src peripheral extension */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) #define IDMA32C_CFGH_DST_PER_EXT(x) ((x) << 30) /* dst peripheral extension */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) /* Bitfields in FIFO_PARTITION */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) #define IDMA32C_FP_PSIZE_CH0(x) ((x) << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) #define IDMA32C_FP_PSIZE_CH1(x) ((x) << 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) #define IDMA32C_FP_UPDATE (1 << 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) enum dw_dmac_flags {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) DW_DMA_IS_CYCLIC = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) DW_DMA_IS_SOFT_LLP = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) DW_DMA_IS_PAUSED = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) DW_DMA_IS_INITIALIZED = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) struct dw_dma_chan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) struct dma_chan chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) void __iomem *ch_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) u8 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) u8 priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) enum dma_transfer_direction direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) /* software emulation of the LLP transfers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) struct list_head *tx_node_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /* these other elements are all protected by lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) struct list_head active_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) struct list_head queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) unsigned int descs_allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) /* hardware configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) unsigned int block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) bool nollp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) u32 max_burst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) /* custom slave configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) struct dw_dma_slave dws;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /* configuration passed via .device_config */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct dma_slave_config dma_sconfig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static inline struct dw_dma_chan_regs __iomem *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) __dwc_regs(struct dw_dma_chan *dwc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return dwc->ch_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) #define channel_readl(dwc, name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) readl(&(__dwc_regs(dwc)->name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) #define channel_writel(dwc, name, val) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) writel((val), &(__dwc_regs(dwc)->name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) return container_of(chan, struct dw_dma_chan, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) struct dw_dma {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) struct dma_device dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) char name[20];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) void __iomem *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) struct dma_pool *desc_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) struct tasklet_struct tasklet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) /* channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct dw_dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) u8 all_chan_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) u8 in_use;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) /* Channel operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) void (*initialize_chan)(struct dw_dma_chan *dwc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) void (*suspend_chan)(struct dw_dma_chan *dwc, bool drain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) void (*resume_chan)(struct dw_dma_chan *dwc, bool drain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) u32 (*prepare_ctllo)(struct dw_dma_chan *dwc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) void (*encode_maxburst)(struct dw_dma_chan *dwc, u32 *maxburst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) u32 (*bytes2block)(struct dw_dma_chan *dwc, size_t bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) unsigned int width, size_t *len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) size_t (*block2bytes)(struct dw_dma_chan *dwc, u32 block, u32 width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /* Device operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) void (*set_device_name)(struct dw_dma *dw, int id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) void (*disable)(struct dw_dma *dw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) void (*enable)(struct dw_dma *dw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) /* platform data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) struct dw_dma_platform_data *pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) return dw->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) #define dma_readl(dw, name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) readl(&(__dw_regs(dw)->name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) #define dma_writel(dw, name, val) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) writel((val), &(__dw_regs(dw)->name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) #define idma32_readq(dw, name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) hi_lo_readq(&(__dw_regs(dw)->name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) #define idma32_writeq(dw, name, val) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) hi_lo_writeq((val), &(__dw_regs(dw)->name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) #define channel_set_bit(dw, reg, mask) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) dma_writel(dw, reg, ((mask) << 8) | (mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) #define channel_clear_bit(dw, reg, mask) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) dma_writel(dw, reg, ((mask) << 8) | 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) static inline struct dw_dma *to_dw_dma(struct dma_device *ddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) return container_of(ddev, struct dw_dma, dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) /* LLI == Linked List Item; a.k.a. DMA block descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) struct dw_lli {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /* values that are not changed by hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) __le32 sar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) __le32 dar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) __le32 llp; /* chain to next lli */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) __le32 ctllo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) /* values that may get written back: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) __le32 ctlhi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) /* sstat and dstat can snapshot peripheral register state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * silicon config may discard either or both...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) __le32 sstat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) __le32 dstat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) struct dw_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) /* FIRST values the hardware uses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) struct dw_lli lli;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) #define lli_set(d, reg, v) ((d)->lli.reg |= cpu_to_le32(v))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) #define lli_clear(d, reg, v) ((d)->lli.reg &= ~cpu_to_le32(v))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) #define lli_read(d, reg) le32_to_cpu((d)->lli.reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) #define lli_write(d, reg, v) ((d)->lli.reg = cpu_to_le32(v))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) /* THEN values for driver housekeeping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) struct list_head desc_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) struct list_head tx_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) struct dma_async_tx_descriptor txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) size_t total_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) u32 residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) #define to_dw_desc(h) list_entry(h, struct dw_desc, desc_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) static inline struct dw_desc *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) txd_to_dw_desc(struct dma_async_tx_descriptor *txd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) return container_of(txd, struct dw_desc, txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }