^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * External DMA controller driver for UniPhier SoCs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright 2019 Socionext Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Author: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/bitfield.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/iopoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/of_dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "dmaengine.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "virt-dma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define XDMAC_CH_WIDTH 0x100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define XDMAC_TFA 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define XDMAC_TFA_MCNT_MASK GENMASK(23, 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define XDMAC_TFA_MASK GENMASK(5, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define XDMAC_SADM 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define XDMAC_SADM_STW_MASK GENMASK(25, 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define XDMAC_SADM_SAM BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define XDMAC_SADM_SAM_FIXED XDMAC_SADM_SAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define XDMAC_SADM_SAM_INC 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define XDMAC_DADM 0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define XDMAC_DADM_DTW_MASK XDMAC_SADM_STW_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define XDMAC_DADM_DAM XDMAC_SADM_SAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define XDMAC_DADM_DAM_FIXED XDMAC_SADM_SAM_FIXED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define XDMAC_DADM_DAM_INC XDMAC_SADM_SAM_INC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define XDMAC_EXSAD 0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define XDMAC_EXDAD 0x1c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define XDMAC_SAD 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define XDMAC_DAD 0x24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define XDMAC_ITS 0x28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define XDMAC_ITS_MASK GENMASK(25, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define XDMAC_TNUM 0x2c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define XDMAC_TNUM_MASK GENMASK(15, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define XDMAC_TSS 0x30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define XDMAC_TSS_REQ BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define XDMAC_IEN 0x34
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define XDMAC_IEN_ERRIEN BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define XDMAC_IEN_ENDIEN BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define XDMAC_STAT 0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define XDMAC_STAT_TENF BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define XDMAC_IR 0x44
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define XDMAC_IR_ERRF BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define XDMAC_IR_ENDF BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define XDMAC_ID 0x48
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define XDMAC_ID_ERRIDF BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define XDMAC_ID_ENDIDF BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define XDMAC_MAX_CHANS 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define XDMAC_INTERVAL_CLKS 20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define XDMAC_MAX_WORDS XDMAC_TNUM_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /* cut lower bit for maintain alignment of maximum transfer size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define XDMAC_MAX_WORD_SIZE (XDMAC_ITS_MASK & ~GENMASK(3, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define UNIPHIER_XDMAC_BUSWIDTHS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct uniphier_xdmac_desc_node {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) dma_addr_t src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) dma_addr_t dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) u32 burst_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) u32 nr_burst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct uniphier_xdmac_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct virt_dma_desc vd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) unsigned int nr_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) unsigned int cur_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) enum dma_transfer_direction dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct uniphier_xdmac_desc_node nodes[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct uniphier_xdmac_chan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct virt_dma_chan vc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct uniphier_xdmac_device *xdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct uniphier_xdmac_desc *xd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) void __iomem *reg_ch_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct dma_slave_config sconfig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) unsigned int req_factor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct uniphier_xdmac_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct dma_device ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) void __iomem *reg_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) int nr_chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct uniphier_xdmac_chan channels[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static struct uniphier_xdmac_chan *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) to_uniphier_xdmac_chan(struct virt_dma_chan *vc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return container_of(vc, struct uniphier_xdmac_chan, vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static struct uniphier_xdmac_desc *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) to_uniphier_xdmac_desc(struct virt_dma_desc *vd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return container_of(vd, struct uniphier_xdmac_desc, vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /* xc->vc.lock must be held by caller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static struct uniphier_xdmac_desc *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) uniphier_xdmac_next_desc(struct uniphier_xdmac_chan *xc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct virt_dma_desc *vd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) vd = vchan_next_desc(&xc->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) if (!vd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) list_del(&vd->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return to_uniphier_xdmac_desc(vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /* xc->vc.lock must be held by caller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static void uniphier_xdmac_chan_start(struct uniphier_xdmac_chan *xc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct uniphier_xdmac_desc *xd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) u32 src_mode, src_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) u32 dst_mode, dst_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) dma_addr_t src_addr, dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) u32 val, its, tnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) enum dma_slave_buswidth buswidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) src_addr = xd->nodes[xd->cur_node].src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) dst_addr = xd->nodes[xd->cur_node].dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) its = xd->nodes[xd->cur_node].burst_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) tnum = xd->nodes[xd->cur_node].nr_burst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * The width of MEM side must be 4 or 8 bytes, that does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * affect that of DEV side and transfer size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (xd->dir == DMA_DEV_TO_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) src_mode = XDMAC_SADM_SAM_FIXED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) buswidth = xc->sconfig.src_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) src_mode = XDMAC_SADM_SAM_INC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) buswidth = DMA_SLAVE_BUSWIDTH_8_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) src_width = FIELD_PREP(XDMAC_SADM_STW_MASK, __ffs(buswidth));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (xd->dir == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) dst_mode = XDMAC_DADM_DAM_FIXED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) buswidth = xc->sconfig.dst_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) dst_mode = XDMAC_DADM_DAM_INC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) buswidth = DMA_SLAVE_BUSWIDTH_8_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) dst_width = FIELD_PREP(XDMAC_DADM_DTW_MASK, __ffs(buswidth));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /* setup transfer factor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) val = FIELD_PREP(XDMAC_TFA_MCNT_MASK, XDMAC_INTERVAL_CLKS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) val |= FIELD_PREP(XDMAC_TFA_MASK, xc->req_factor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) writel(val, xc->reg_ch_base + XDMAC_TFA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) /* setup the channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) writel(lower_32_bits(src_addr), xc->reg_ch_base + XDMAC_SAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) writel(upper_32_bits(src_addr), xc->reg_ch_base + XDMAC_EXSAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) writel(lower_32_bits(dst_addr), xc->reg_ch_base + XDMAC_DAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) writel(upper_32_bits(dst_addr), xc->reg_ch_base + XDMAC_EXDAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) src_mode |= src_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) dst_mode |= dst_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) writel(src_mode, xc->reg_ch_base + XDMAC_SADM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) writel(dst_mode, xc->reg_ch_base + XDMAC_DADM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) writel(its, xc->reg_ch_base + XDMAC_ITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) writel(tnum, xc->reg_ch_base + XDMAC_TNUM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) /* enable interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) writel(XDMAC_IEN_ENDIEN | XDMAC_IEN_ERRIEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) xc->reg_ch_base + XDMAC_IEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /* start XDMAC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) val = readl(xc->reg_ch_base + XDMAC_TSS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) val |= XDMAC_TSS_REQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) writel(val, xc->reg_ch_base + XDMAC_TSS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /* xc->vc.lock must be held by caller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) static int uniphier_xdmac_chan_stop(struct uniphier_xdmac_chan *xc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) /* disable interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) val = readl(xc->reg_ch_base + XDMAC_IEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) val &= ~(XDMAC_IEN_ENDIEN | XDMAC_IEN_ERRIEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) writel(val, xc->reg_ch_base + XDMAC_IEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /* stop XDMAC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) val = readl(xc->reg_ch_base + XDMAC_TSS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) val &= ~XDMAC_TSS_REQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) writel(0, xc->reg_ch_base + XDMAC_TSS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) /* wait until transfer is stopped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) return readl_poll_timeout_atomic(xc->reg_ch_base + XDMAC_STAT, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) !(val & XDMAC_STAT_TENF), 100, 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) /* xc->vc.lock must be held by caller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) static void uniphier_xdmac_start(struct uniphier_xdmac_chan *xc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct uniphier_xdmac_desc *xd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) xd = uniphier_xdmac_next_desc(xc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (xd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) uniphier_xdmac_chan_start(xc, xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /* set desc to chan regardless of xd is null */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) xc->xd = xd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) static void uniphier_xdmac_chan_irq(struct uniphier_xdmac_chan *xc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) u32 stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) spin_lock(&xc->vc.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) stat = readl(xc->reg_ch_base + XDMAC_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (stat & XDMAC_ID_ERRIDF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) ret = uniphier_xdmac_chan_stop(xc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) dev_err(xc->xdev->ddev.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) "DMA transfer error with aborting issue\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) dev_err(xc->xdev->ddev.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) "DMA transfer error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) } else if ((stat & XDMAC_ID_ENDIDF) && xc->xd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) xc->xd->cur_node++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (xc->xd->cur_node >= xc->xd->nr_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) vchan_cookie_complete(&xc->xd->vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) uniphier_xdmac_start(xc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) uniphier_xdmac_chan_start(xc, xc->xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) /* write bits to clear */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) writel(stat, xc->reg_ch_base + XDMAC_IR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) spin_unlock(&xc->vc.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) static irqreturn_t uniphier_xdmac_irq_handler(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) struct uniphier_xdmac_device *xdev = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) for (i = 0; i < xdev->nr_chans; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) uniphier_xdmac_chan_irq(&xdev->channels[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static void uniphier_xdmac_free_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) vchan_free_chan_resources(to_virt_chan(chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) uniphier_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) dma_addr_t src, size_t len, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) struct virt_dma_chan *vc = to_virt_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) struct uniphier_xdmac_desc *xd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) unsigned int nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) size_t burst_size, tlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (len > XDMAC_MAX_WORD_SIZE * XDMAC_MAX_WORDS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) nr = 1 + len / XDMAC_MAX_WORD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) xd = kzalloc(struct_size(xd, nodes, nr), GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (!xd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) for (i = 0; i < nr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) burst_size = min_t(size_t, len, XDMAC_MAX_WORD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) xd->nodes[i].src = src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) xd->nodes[i].dst = dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) xd->nodes[i].burst_size = burst_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) xd->nodes[i].nr_burst = len / burst_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) tlen = rounddown(len, burst_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) src += tlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) dst += tlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) len -= tlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) xd->dir = DMA_MEM_TO_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) xd->nr_node = nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) xd->cur_node = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) return vchan_tx_prep(vc, &xd->vd, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) uniphier_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) unsigned int sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) enum dma_transfer_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) unsigned long flags, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) struct virt_dma_chan *vc = to_virt_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) struct uniphier_xdmac_chan *xc = to_uniphier_xdmac_chan(vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) struct uniphier_xdmac_desc *xd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) enum dma_slave_buswidth buswidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) u32 maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (!is_slave_direction(direction))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (direction == DMA_DEV_TO_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) buswidth = xc->sconfig.src_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) maxburst = xc->sconfig.src_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) buswidth = xc->sconfig.dst_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) maxburst = xc->sconfig.dst_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) if (!maxburst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) maxburst = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (maxburst > xc->xdev->ddev.max_burst) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) dev_err(xc->xdev->ddev.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) "Exceed maximum number of burst words\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) xd = kzalloc(struct_size(xd, nodes, sg_len), GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (!xd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) for_each_sg(sgl, sg, sg_len, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) xd->nodes[i].src = (direction == DMA_DEV_TO_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) ? xc->sconfig.src_addr : sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) xd->nodes[i].dst = (direction == DMA_MEM_TO_DEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) ? xc->sconfig.dst_addr : sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) xd->nodes[i].burst_size = maxburst * buswidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) xd->nodes[i].nr_burst =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) sg_dma_len(sg) / xd->nodes[i].burst_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * Currently transfer that size doesn't align the unit size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * (the number of burst words * bus-width) is not allowed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * because the driver does not support the way to transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * residue size. As a matter of fact, in order to transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * arbitrary size, 'src_maxburst' or 'dst_maxburst' of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * dma_slave_config must be 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (sg_dma_len(sg) % xd->nodes[i].burst_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) dev_err(xc->xdev->ddev.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) "Unaligned transfer size: %d", sg_dma_len(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) kfree(xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (xd->nodes[i].nr_burst > XDMAC_MAX_WORDS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) dev_err(xc->xdev->ddev.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) "Exceed maximum transfer size");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) kfree(xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) xd->dir = direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) xd->nr_node = sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) xd->cur_node = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) return vchan_tx_prep(vc, &xd->vd, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) static int uniphier_xdmac_slave_config(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) struct dma_slave_config *config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) struct virt_dma_chan *vc = to_virt_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) struct uniphier_xdmac_chan *xc = to_uniphier_xdmac_chan(vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) memcpy(&xc->sconfig, config, sizeof(*config));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) static int uniphier_xdmac_terminate_all(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) struct virt_dma_chan *vc = to_virt_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) struct uniphier_xdmac_chan *xc = to_uniphier_xdmac_chan(vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) LIST_HEAD(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) spin_lock_irqsave(&vc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (xc->xd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) vchan_terminate_vdesc(&xc->xd->vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) xc->xd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) ret = uniphier_xdmac_chan_stop(xc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) vchan_get_all_descriptors(vc, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) spin_unlock_irqrestore(&vc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) vchan_dma_desc_free_list(vc, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) static void uniphier_xdmac_synchronize(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) vchan_synchronize(to_virt_chan(chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) static void uniphier_xdmac_issue_pending(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) struct virt_dma_chan *vc = to_virt_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) struct uniphier_xdmac_chan *xc = to_uniphier_xdmac_chan(vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) spin_lock_irqsave(&vc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (vchan_issue_pending(vc) && !xc->xd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) uniphier_xdmac_start(xc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) spin_unlock_irqrestore(&vc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) static void uniphier_xdmac_desc_free(struct virt_dma_desc *vd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) kfree(to_uniphier_xdmac_desc(vd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) static void uniphier_xdmac_chan_init(struct uniphier_xdmac_device *xdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) int ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) struct uniphier_xdmac_chan *xc = &xdev->channels[ch];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) xc->xdev = xdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) xc->reg_ch_base = xdev->reg_base + XDMAC_CH_WIDTH * ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) xc->vc.desc_free = uniphier_xdmac_desc_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) vchan_init(&xc->vc, &xdev->ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) static struct dma_chan *of_dma_uniphier_xlate(struct of_phandle_args *dma_spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) struct of_dma *ofdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) struct uniphier_xdmac_device *xdev = ofdma->of_dma_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) int chan_id = dma_spec->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (chan_id >= xdev->nr_chans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) xdev->channels[chan_id].id = chan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) xdev->channels[chan_id].req_factor = dma_spec->args[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) return dma_get_slave_channel(&xdev->channels[chan_id].vc.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) static int uniphier_xdmac_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) struct uniphier_xdmac_device *xdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) struct dma_device *ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) int nr_chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (of_property_read_u32(dev->of_node, "dma-channels", &nr_chans))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (nr_chans > XDMAC_MAX_CHANS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) nr_chans = XDMAC_MAX_CHANS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) xdev = devm_kzalloc(dev, struct_size(xdev, channels, nr_chans),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (!xdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) xdev->nr_chans = nr_chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) xdev->reg_base = devm_platform_ioremap_resource(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (IS_ERR(xdev->reg_base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) return PTR_ERR(xdev->reg_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) ddev = &xdev->ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) ddev->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) dma_cap_zero(ddev->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) dma_cap_set(DMA_MEMCPY, ddev->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) dma_cap_set(DMA_SLAVE, ddev->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) ddev->src_addr_widths = UNIPHIER_XDMAC_BUSWIDTHS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) ddev->dst_addr_widths = UNIPHIER_XDMAC_BUSWIDTHS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) ddev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) BIT(DMA_MEM_TO_MEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) ddev->max_burst = XDMAC_MAX_WORDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) ddev->device_free_chan_resources = uniphier_xdmac_free_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) ddev->device_prep_dma_memcpy = uniphier_xdmac_prep_dma_memcpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) ddev->device_prep_slave_sg = uniphier_xdmac_prep_slave_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) ddev->device_config = uniphier_xdmac_slave_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) ddev->device_terminate_all = uniphier_xdmac_terminate_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) ddev->device_synchronize = uniphier_xdmac_synchronize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) ddev->device_tx_status = dma_cookie_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) ddev->device_issue_pending = uniphier_xdmac_issue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) INIT_LIST_HEAD(&ddev->channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) for (i = 0; i < nr_chans; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) uniphier_xdmac_chan_init(xdev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) ret = devm_request_irq(dev, irq, uniphier_xdmac_irq_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) IRQF_SHARED, "xdmac", xdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) dev_err(dev, "Failed to request IRQ\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) ret = dma_async_device_register(ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) dev_err(dev, "Failed to register XDMA device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) ret = of_dma_controller_register(dev->of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) of_dma_uniphier_xlate, xdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) dev_err(dev, "Failed to register XDMA controller\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) goto out_unregister_dmac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) platform_set_drvdata(pdev, xdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) dev_info(&pdev->dev, "UniPhier XDMAC driver (%d channels)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) nr_chans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) out_unregister_dmac:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) dma_async_device_unregister(ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) static int uniphier_xdmac_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) struct uniphier_xdmac_device *xdev = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) struct dma_device *ddev = &xdev->ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * Before reaching here, almost all descriptors have been freed by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) * ->device_free_chan_resources() hook. However, each channel might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) * be still holding one descriptor that was on-flight at that moment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) * Terminate it to make sure this hardware is no longer running. Then,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) * free the channel resources once again to avoid memory leak.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) list_for_each_entry(chan, &ddev->channels, device_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) ret = dmaengine_terminate_sync(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) uniphier_xdmac_free_chan_resources(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) of_dma_controller_free(pdev->dev.of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) dma_async_device_unregister(ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) static const struct of_device_id uniphier_xdmac_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) { .compatible = "socionext,uniphier-xdmac" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) { /* sentinel */ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) MODULE_DEVICE_TABLE(of, uniphier_xdmac_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) static struct platform_driver uniphier_xdmac_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) .probe = uniphier_xdmac_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) .remove = uniphier_xdmac_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) .name = "uniphier-xdmac",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) .of_match_table = uniphier_xdmac_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) module_platform_driver(uniphier_xdmac_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) MODULE_DESCRIPTION("UniPhier external DMA controller driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) MODULE_LICENSE("GPL v2");