^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) // Copyright (C) 2018 Socionext Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) // Author: Masahiro Yamada <yamada.masahiro@socionext.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/bits.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/iopoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/of_dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "virt-dma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /* registers common for all channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define UNIPHIER_MDMAC_CMD 0x000 /* issue DMA start/abort */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define UNIPHIER_MDMAC_CMD_ABORT BIT(31) /* 1: abort, 0: start */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /* per-channel registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define UNIPHIER_MDMAC_CH_OFFSET 0x100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define UNIPHIER_MDMAC_CH_STRIDE 0x040
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define UNIPHIER_MDMAC_CH_IRQ_STAT 0x010 /* current hw status (RO) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define UNIPHIER_MDMAC_CH_IRQ_REQ 0x014 /* latched STAT (WOC) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define UNIPHIER_MDMAC_CH_IRQ_EN 0x018 /* IRQ enable mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define UNIPHIER_MDMAC_CH_IRQ_DET 0x01c /* REQ & EN (RO) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define UNIPHIER_MDMAC_CH_IRQ__ABORT BIT(13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define UNIPHIER_MDMAC_CH_IRQ__DONE BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define UNIPHIER_MDMAC_CH_SRC_MODE 0x020 /* mode of source */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define UNIPHIER_MDMAC_CH_DEST_MODE 0x024 /* mode of destination */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define UNIPHIER_MDMAC_CH_MODE__ADDR_INC (0 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define UNIPHIER_MDMAC_CH_MODE__ADDR_DEC (1 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define UNIPHIER_MDMAC_CH_MODE__ADDR_FIXED (2 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define UNIPHIER_MDMAC_CH_SRC_ADDR 0x028 /* source address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define UNIPHIER_MDMAC_CH_DEST_ADDR 0x02c /* destination address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define UNIPHIER_MDMAC_CH_SIZE 0x030 /* transfer bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define UNIPHIER_MDMAC_SLAVE_BUSWIDTHS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct uniphier_mdmac_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct virt_dma_desc vd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct scatterlist *sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) unsigned int sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) unsigned int sg_cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) enum dma_transfer_direction dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct uniphier_mdmac_chan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct virt_dma_chan vc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct uniphier_mdmac_device *mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct uniphier_mdmac_desc *md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) void __iomem *reg_ch_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) unsigned int chan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct uniphier_mdmac_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct dma_device ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) void __iomem *reg_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct uniphier_mdmac_chan channels[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static struct uniphier_mdmac_chan *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) to_uniphier_mdmac_chan(struct virt_dma_chan *vc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) return container_of(vc, struct uniphier_mdmac_chan, vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static struct uniphier_mdmac_desc *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) to_uniphier_mdmac_desc(struct virt_dma_desc *vd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) return container_of(vd, struct uniphier_mdmac_desc, vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /* mc->vc.lock must be held by caller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static struct uniphier_mdmac_desc *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) uniphier_mdmac_next_desc(struct uniphier_mdmac_chan *mc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct virt_dma_desc *vd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) vd = vchan_next_desc(&mc->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (!vd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) mc->md = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) list_del(&vd->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) mc->md = to_uniphier_mdmac_desc(vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return mc->md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /* mc->vc.lock must be held by caller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static void uniphier_mdmac_handle(struct uniphier_mdmac_chan *mc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct uniphier_mdmac_desc *md)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct uniphier_mdmac_device *mdev = mc->mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) u32 irq_flag = UNIPHIER_MDMAC_CH_IRQ__DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) u32 src_mode, src_addr, dest_mode, dest_addr, chunk_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) sg = &md->sgl[md->sg_cur];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (md->dir == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) src_mode = UNIPHIER_MDMAC_CH_MODE__ADDR_INC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) src_addr = sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) dest_mode = UNIPHIER_MDMAC_CH_MODE__ADDR_FIXED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) dest_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) src_mode = UNIPHIER_MDMAC_CH_MODE__ADDR_FIXED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) src_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) dest_mode = UNIPHIER_MDMAC_CH_MODE__ADDR_INC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) dest_addr = sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) chunk_size = sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) writel(src_mode, mc->reg_ch_base + UNIPHIER_MDMAC_CH_SRC_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) writel(dest_mode, mc->reg_ch_base + UNIPHIER_MDMAC_CH_DEST_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) writel(src_addr, mc->reg_ch_base + UNIPHIER_MDMAC_CH_SRC_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) writel(dest_addr, mc->reg_ch_base + UNIPHIER_MDMAC_CH_DEST_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) writel(chunk_size, mc->reg_ch_base + UNIPHIER_MDMAC_CH_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /* write 1 to clear */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) writel(irq_flag, mc->reg_ch_base + UNIPHIER_MDMAC_CH_IRQ_REQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) writel(irq_flag, mc->reg_ch_base + UNIPHIER_MDMAC_CH_IRQ_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) writel(BIT(mc->chan_id), mdev->reg_base + UNIPHIER_MDMAC_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /* mc->vc.lock must be held by caller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static void uniphier_mdmac_start(struct uniphier_mdmac_chan *mc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct uniphier_mdmac_desc *md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) md = uniphier_mdmac_next_desc(mc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (md)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) uniphier_mdmac_handle(mc, md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) /* mc->vc.lock must be held by caller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static int uniphier_mdmac_abort(struct uniphier_mdmac_chan *mc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct uniphier_mdmac_device *mdev = mc->mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) u32 irq_flag = UNIPHIER_MDMAC_CH_IRQ__ABORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /* write 1 to clear */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) writel(irq_flag, mc->reg_ch_base + UNIPHIER_MDMAC_CH_IRQ_REQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) writel(UNIPHIER_MDMAC_CMD_ABORT | BIT(mc->chan_id),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) mdev->reg_base + UNIPHIER_MDMAC_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * Abort should be accepted soon. We poll the bit here instead of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * waiting for the interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) return readl_poll_timeout(mc->reg_ch_base + UNIPHIER_MDMAC_CH_IRQ_REQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) val, val & irq_flag, 0, 20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static irqreturn_t uniphier_mdmac_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) struct uniphier_mdmac_chan *mc = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct uniphier_mdmac_desc *md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) irqreturn_t ret = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) u32 irq_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) spin_lock(&mc->vc.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) irq_stat = readl(mc->reg_ch_base + UNIPHIER_MDMAC_CH_IRQ_DET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * Some channels share a single interrupt line. If the IRQ status is 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * this is probably triggered by a different channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (!irq_stat) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) ret = IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /* write 1 to clear */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) writel(irq_stat, mc->reg_ch_base + UNIPHIER_MDMAC_CH_IRQ_REQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * UNIPHIER_MDMAC_CH_IRQ__DONE interrupt is asserted even when the DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * is aborted. To distinguish the normal completion and the abort,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * check mc->md. If it is NULL, we are aborting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) md = mc->md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (!md)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) md->sg_cur++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (md->sg_cur >= md->sg_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) vchan_cookie_complete(&md->vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) md = uniphier_mdmac_next_desc(mc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (!md)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) uniphier_mdmac_handle(mc, md);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) spin_unlock(&mc->vc.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static void uniphier_mdmac_free_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) vchan_free_chan_resources(to_virt_chan(chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) uniphier_mdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) unsigned int sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) enum dma_transfer_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) unsigned long flags, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) struct virt_dma_chan *vc = to_virt_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) struct uniphier_mdmac_desc *md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (!is_slave_direction(direction))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) md = kzalloc(sizeof(*md), GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (!md)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) md->sgl = sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) md->sg_len = sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) md->dir = direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) return vchan_tx_prep(vc, &md->vd, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) static int uniphier_mdmac_terminate_all(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) struct virt_dma_chan *vc = to_virt_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) struct uniphier_mdmac_chan *mc = to_uniphier_mdmac_chan(vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) LIST_HEAD(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) spin_lock_irqsave(&vc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (mc->md) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) vchan_terminate_vdesc(&mc->md->vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) mc->md = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) ret = uniphier_mdmac_abort(mc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) vchan_get_all_descriptors(vc, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) spin_unlock_irqrestore(&vc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) vchan_dma_desc_free_list(vc, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) static void uniphier_mdmac_synchronize(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) vchan_synchronize(to_virt_chan(chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) static enum dma_status uniphier_mdmac_tx_status(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) dma_cookie_t cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) struct dma_tx_state *txstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) struct virt_dma_chan *vc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) struct virt_dma_desc *vd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) struct uniphier_mdmac_chan *mc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) struct uniphier_mdmac_desc *md = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) enum dma_status stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) stat = dma_cookie_status(chan, cookie, txstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) /* Return immediately if we do not need to compute the residue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (stat == DMA_COMPLETE || !txstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) vc = to_virt_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) spin_lock_irqsave(&vc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) mc = to_uniphier_mdmac_chan(vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if (mc->md && mc->md->vd.tx.cookie == cookie) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /* residue from the on-flight chunk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) txstate->residue = readl(mc->reg_ch_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) UNIPHIER_MDMAC_CH_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) md = mc->md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (!md) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) vd = vchan_find_desc(vc, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (vd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) md = to_uniphier_mdmac_desc(vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (md) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) /* residue from the queued chunks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) for (i = md->sg_cur; i < md->sg_len; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) txstate->residue += sg_dma_len(&md->sgl[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) spin_unlock_irqrestore(&vc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) return stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static void uniphier_mdmac_issue_pending(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) struct virt_dma_chan *vc = to_virt_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) struct uniphier_mdmac_chan *mc = to_uniphier_mdmac_chan(vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) spin_lock_irqsave(&vc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (vchan_issue_pending(vc) && !mc->md)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) uniphier_mdmac_start(mc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) spin_unlock_irqrestore(&vc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) static void uniphier_mdmac_desc_free(struct virt_dma_desc *vd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) kfree(to_uniphier_mdmac_desc(vd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) static int uniphier_mdmac_chan_init(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) struct uniphier_mdmac_device *mdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) int chan_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) struct uniphier_mdmac_chan *mc = &mdev->channels[chan_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) char *irq_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) int irq, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) irq = platform_get_irq(pdev, chan_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) irq_name = devm_kasprintf(dev, GFP_KERNEL, "uniphier-mio-dmac-ch%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) chan_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (!irq_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) ret = devm_request_irq(dev, irq, uniphier_mdmac_interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) IRQF_SHARED, irq_name, mc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) mc->mdev = mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) mc->reg_ch_base = mdev->reg_base + UNIPHIER_MDMAC_CH_OFFSET +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) UNIPHIER_MDMAC_CH_STRIDE * chan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) mc->chan_id = chan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) mc->vc.desc_free = uniphier_mdmac_desc_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) vchan_init(&mc->vc, &mdev->ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) static int uniphier_mdmac_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) struct uniphier_mdmac_device *mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) struct dma_device *ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) int nr_chans, ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) nr_chans = platform_irq_count(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (nr_chans < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) return nr_chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) ret = dma_set_mask(dev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) mdev = devm_kzalloc(dev, struct_size(mdev, channels, nr_chans),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (!mdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) mdev->reg_base = devm_platform_ioremap_resource(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (IS_ERR(mdev->reg_base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) return PTR_ERR(mdev->reg_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) mdev->clk = devm_clk_get(dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) if (IS_ERR(mdev->clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) dev_err(dev, "failed to get clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) return PTR_ERR(mdev->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) ret = clk_prepare_enable(mdev->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) ddev = &mdev->ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) ddev->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) ddev->src_addr_widths = UNIPHIER_MDMAC_SLAVE_BUSWIDTHS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) ddev->dst_addr_widths = UNIPHIER_MDMAC_SLAVE_BUSWIDTHS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) ddev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) ddev->device_free_chan_resources = uniphier_mdmac_free_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) ddev->device_prep_slave_sg = uniphier_mdmac_prep_slave_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) ddev->device_terminate_all = uniphier_mdmac_terminate_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) ddev->device_synchronize = uniphier_mdmac_synchronize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) ddev->device_tx_status = uniphier_mdmac_tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) ddev->device_issue_pending = uniphier_mdmac_issue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) INIT_LIST_HEAD(&ddev->channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) for (i = 0; i < nr_chans; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) ret = uniphier_mdmac_chan_init(pdev, mdev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) goto disable_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) ret = dma_async_device_register(ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) goto disable_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) ret = of_dma_controller_register(dev->of_node, of_dma_xlate_by_chan_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) goto unregister_dmac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) platform_set_drvdata(pdev, mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) unregister_dmac:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) dma_async_device_unregister(ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) disable_clk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) clk_disable_unprepare(mdev->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) static int uniphier_mdmac_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) struct uniphier_mdmac_device *mdev = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * Before reaching here, almost all descriptors have been freed by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * ->device_free_chan_resources() hook. However, each channel might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * be still holding one descriptor that was on-flight at that moment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * Terminate it to make sure this hardware is no longer running. Then,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * free the channel resources once again to avoid memory leak.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) list_for_each_entry(chan, &mdev->ddev.channels, device_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) ret = dmaengine_terminate_sync(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) uniphier_mdmac_free_chan_resources(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) of_dma_controller_free(pdev->dev.of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) dma_async_device_unregister(&mdev->ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) clk_disable_unprepare(mdev->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) static const struct of_device_id uniphier_mdmac_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) { .compatible = "socionext,uniphier-mio-dmac" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) { /* sentinel */ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) MODULE_DEVICE_TABLE(of, uniphier_mdmac_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) static struct platform_driver uniphier_mdmac_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) .probe = uniphier_mdmac_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) .remove = uniphier_mdmac_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) .name = "uniphier-mio-dmac",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) .of_match_table = uniphier_mdmac_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) module_platform_driver(uniphier_mdmac_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) MODULE_DESCRIPTION("UniPhier MIO DMAC driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) MODULE_LICENSE("GPL v2");