^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright 2012 Marvell International Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/platform_data/mmp_dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/dmapool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/of_dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/dma/mmp-pdma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "dmaengine.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define DCSR 0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define DALGN 0x00a0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define DINT 0x00f0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define DDADR 0x0200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define DSADR(n) (0x0204 + ((n) << 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define DTADR(n) (0x0208 + ((n) << 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define DCMD 0x020c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define DCSR_RUN BIT(31) /* Run Bit (read / write) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define DCSR_NODESC BIT(30) /* No-Descriptor Fetch (read / write) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define DCSR_STOPIRQEN BIT(29) /* Stop Interrupt Enable (read / write) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define DCSR_REQPEND BIT(8) /* Request Pending (read-only) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define DCSR_STOPSTATE BIT(3) /* Stop State (read-only) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define DCSR_ENDINTR BIT(2) /* End Interrupt (read / write) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define DCSR_STARTINTR BIT(1) /* Start Interrupt (read / write) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define DCSR_BUSERR BIT(0) /* Bus Error Interrupt (read / write) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define DCSR_EORIRQEN BIT(28) /* End of Receive Interrupt Enable (R/W) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define DCSR_EORJMPEN BIT(27) /* Jump to next descriptor on EOR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define DCSR_EORSTOPEN BIT(26) /* STOP on an EOR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define DCSR_SETCMPST BIT(25) /* Set Descriptor Compare Status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define DCSR_CLRCMPST BIT(24) /* Clear Descriptor Compare Status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define DCSR_CMPST BIT(10) /* The Descriptor Compare Status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define DCSR_EORINTR BIT(9) /* The end of Receive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define DRCMR(n) ((((n) < 64) ? 0x0100 : 0x1100) + (((n) & 0x3f) << 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define DRCMR_MAPVLD BIT(7) /* Map Valid (read / write) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define DDADR_STOP BIT(0) /* Stop (read / write) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define DCMD_INCSRCADDR BIT(31) /* Source Address Increment Setting. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define DCMD_INCTRGADDR BIT(30) /* Target Address Increment Setting. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define DCMD_FLOWSRC BIT(29) /* Flow Control by the source. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define DCMD_FLOWTRG BIT(28) /* Flow Control by the target. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define DCMD_STARTIRQEN BIT(22) /* Start Interrupt Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define DCMD_ENDIRQEN BIT(21) /* End Interrupt Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define DCMD_ENDIAN BIT(18) /* Device Endian-ness. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define DCMD_BURST8 (1 << 16) /* 8 byte burst */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define DCMD_BURST16 (2 << 16) /* 16 byte burst */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define DCMD_BURST32 (3 << 16) /* 32 byte burst */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define DCMD_WIDTH1 (1 << 14) /* 1 byte width */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define PDMA_MAX_DESC_BYTES DCMD_LENGTH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct mmp_pdma_desc_hw {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) u32 ddadr; /* Points to the next descriptor + flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) u32 dsadr; /* DSADR value for the current transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) u32 dtadr; /* DTADR value for the current transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) u32 dcmd; /* DCMD value for the current transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) } __aligned(32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct mmp_pdma_desc_sw {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct mmp_pdma_desc_hw desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct list_head tx_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct dma_async_tx_descriptor async_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct mmp_pdma_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct mmp_pdma_chan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) struct dma_chan chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct dma_async_tx_descriptor desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct mmp_pdma_phy *phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) enum dma_transfer_direction dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct dma_slave_config slave_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct mmp_pdma_desc_sw *cyclic_first; /* first desc_sw if channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * is in cyclic mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /* channel's basic info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct tasklet_struct tasklet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) u32 dcmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) u32 drcmr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) u32 dev_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) /* list for desc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) spinlock_t desc_lock; /* Descriptor list lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct list_head chain_pending; /* Link descriptors queue for pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct list_head chain_running; /* Link descriptors queue for running */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) bool idle; /* channel statue machine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) bool byte_align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct dma_pool *desc_pool; /* Descriptors pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct mmp_pdma_phy {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct mmp_pdma_chan *vchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct mmp_pdma_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) int dma_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct dma_device device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct mmp_pdma_phy *phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) spinlock_t phy_lock; /* protect alloc/free phy channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define tx_to_mmp_pdma_desc(tx) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) container_of(tx, struct mmp_pdma_desc_sw, async_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define to_mmp_pdma_desc(lh) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) container_of(lh, struct mmp_pdma_desc_sw, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #define to_mmp_pdma_chan(dchan) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) container_of(dchan, struct mmp_pdma_chan, chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define to_mmp_pdma_dev(dmadev) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) container_of(dmadev, struct mmp_pdma_device, device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static int mmp_pdma_config_write(struct dma_chan *dchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct dma_slave_config *cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) enum dma_transfer_direction direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) u32 reg = (phy->idx << 4) + DDADR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) writel(addr, phy->base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static void enable_chan(struct mmp_pdma_phy *phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) u32 reg, dalgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) if (!phy->vchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) reg = DRCMR(phy->vchan->drcmr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) writel(DRCMR_MAPVLD | phy->idx, phy->base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) dalgn = readl(phy->base + DALGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) if (phy->vchan->byte_align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) dalgn |= 1 << phy->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) dalgn &= ~(1 << phy->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) writel(dalgn, phy->base + DALGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) reg = (phy->idx << 2) + DCSR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) writel(readl(phy->base + reg) | DCSR_RUN, phy->base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static void disable_chan(struct mmp_pdma_phy *phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (!phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) reg = (phy->idx << 2) + DCSR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) writel(readl(phy->base + reg) & ~DCSR_RUN, phy->base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) static int clear_chan_irq(struct mmp_pdma_phy *phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) u32 dcsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) u32 dint = readl(phy->base + DINT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) u32 reg = (phy->idx << 2) + DCSR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (!(dint & BIT(phy->idx)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /* clear irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) dcsr = readl(phy->base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) writel(dcsr, phy->base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if ((dcsr & DCSR_BUSERR) && (phy->vchan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) dev_warn(phy->vchan->dev, "DCSR_BUSERR\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct mmp_pdma_phy *phy = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (clear_chan_irq(phy) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) tasklet_schedule(&phy->vchan->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) struct mmp_pdma_device *pdev = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct mmp_pdma_phy *phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) u32 dint = readl(pdev->base + DINT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) int irq_num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) while (dint) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) i = __ffs(dint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) /* only handle interrupts belonging to pdma driver*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (i >= pdev->dma_channels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) dint &= (dint - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) phy = &pdev->phy[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) ret = mmp_pdma_chan_handler(irq, phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (ret == IRQ_HANDLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) irq_num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (irq_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /* lookup free phy channel as descending priority */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) int prio, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct mmp_pdma_phy *phy, *found = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * dma channel priorities
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * ch 0 - 3, 16 - 19 <--> (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * ch 4 - 7, 20 - 23 <--> (1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * ch 8 - 11, 24 - 27 <--> (2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * ch 12 - 15, 28 - 31 <--> (3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) spin_lock_irqsave(&pdev->phy_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) for (prio = 0; prio <= ((pdev->dma_channels - 1) & 0xf) >> 2; prio++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) for (i = 0; i < pdev->dma_channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (prio != (i & 0xf) >> 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) phy = &pdev->phy[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (!phy->vchan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) phy->vchan = pchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) found = phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) spin_unlock_irqrestore(&pdev->phy_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (!pchan->phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /* clear the channel mapping in DRCMR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) reg = DRCMR(pchan->drcmr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) writel(0, pchan->phy->base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) spin_lock_irqsave(&pdev->phy_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) pchan->phy->vchan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) pchan->phy = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) spin_unlock_irqrestore(&pdev->phy_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * start_pending_queue - transfer any pending transactions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * pending list ==> running list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static void start_pending_queue(struct mmp_pdma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) struct mmp_pdma_desc_sw *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) /* still in running, irq will start the pending list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (!chan->idle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) dev_dbg(chan->dev, "DMA controller still busy\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (list_empty(&chan->chain_pending)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) /* chance to re-fetch phy channel with higher prio */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) mmp_pdma_free_phy(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) dev_dbg(chan->dev, "no pending list\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (!chan->phy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) chan->phy = lookup_phy(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) if (!chan->phy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) dev_dbg(chan->dev, "no free dma channel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * pending -> running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * reintilize pending list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) desc = list_first_entry(&chan->chain_pending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) struct mmp_pdma_desc_sw, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) list_splice_tail_init(&chan->chain_pending, &chan->chain_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * Program the descriptor's address into the DMA controller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * then start the DMA transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) set_desc(chan->phy, desc->async_tx.phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) enable_chan(chan->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) chan->idle = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) /* desc->tx_list ==> pending list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) struct mmp_pdma_chan *chan = to_mmp_pdma_chan(tx->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) struct mmp_pdma_desc_sw *desc = tx_to_mmp_pdma_desc(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct mmp_pdma_desc_sw *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) dma_cookie_t cookie = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) spin_lock_irqsave(&chan->desc_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) list_for_each_entry(child, &desc->tx_list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) cookie = dma_cookie_assign(&child->async_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) /* softly link to pending list - desc->tx_list ==> pending list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) list_splice_tail_init(&desc->tx_list, &chan->chain_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) spin_unlock_irqrestore(&chan->desc_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) return cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) static struct mmp_pdma_desc_sw *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) struct mmp_pdma_desc_sw *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) dma_addr_t pdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (!desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) dev_err(chan->dev, "out of memory for link descriptor\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) INIT_LIST_HEAD(&desc->tx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) /* each desc has submit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) desc->async_tx.tx_submit = mmp_pdma_tx_submit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) desc->async_tx.phys = pdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) return desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * mmp_pdma_alloc_chan_resources - Allocate resources for DMA channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * This function will create a dma pool for descriptor allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * Request irq only when channel is requested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * Return - The number of allocated descriptors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) if (chan->desc_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) chan->desc_pool = dma_pool_create(dev_name(&dchan->dev->device),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) chan->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) sizeof(struct mmp_pdma_desc_sw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) __alignof__(struct mmp_pdma_desc_sw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (!chan->desc_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) dev_err(chan->dev, "unable to allocate descriptor pool\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) mmp_pdma_free_phy(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) chan->idle = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) chan->dev_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) struct list_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) struct mmp_pdma_desc_sw *desc, *_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) list_for_each_entry_safe(desc, _desc, list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) list_del(&desc->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) static void mmp_pdma_free_chan_resources(struct dma_chan *dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) spin_lock_irqsave(&chan->desc_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) mmp_pdma_free_desc_list(chan, &chan->chain_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) mmp_pdma_free_desc_list(chan, &chan->chain_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) spin_unlock_irqrestore(&chan->desc_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) dma_pool_destroy(chan->desc_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) chan->desc_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) chan->idle = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) chan->dev_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) mmp_pdma_free_phy(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) mmp_pdma_prep_memcpy(struct dma_chan *dchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) dma_addr_t dma_dst, dma_addr_t dma_src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) size_t len, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) struct mmp_pdma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) size_t copy = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if (!dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (!len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) chan = to_mmp_pdma_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) chan->byte_align = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (!chan->dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) chan->dir = DMA_MEM_TO_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) chan->dcmd = DCMD_INCTRGADDR | DCMD_INCSRCADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) chan->dcmd |= DCMD_BURST32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) /* Allocate the link descriptor from DMA pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) new = mmp_pdma_alloc_descriptor(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (!new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) dev_err(chan->dev, "no memory for desc\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) if (dma_src & 0x7 || dma_dst & 0x7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) chan->byte_align = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) new->desc.dsadr = dma_src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) new->desc.dtadr = dma_dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (!first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) first = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) prev->desc.ddadr = new->async_tx.phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) new->async_tx.cookie = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) async_tx_ack(&new->async_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) prev = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) len -= copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (chan->dir == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) dma_src += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) } else if (chan->dir == DMA_DEV_TO_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) dma_dst += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) } else if (chan->dir == DMA_MEM_TO_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) dma_src += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) dma_dst += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) /* Insert the link descriptor to the LD ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) list_add_tail(&new->node, &first->tx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) } while (len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) first->async_tx.flags = flags; /* client is in control of this ack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) first->async_tx.cookie = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) /* last desc and fire IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) new->desc.ddadr = DDADR_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) new->desc.dcmd |= DCMD_ENDIRQEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) chan->cyclic_first = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) return &first->async_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) mmp_pdma_free_desc_list(chan, &first->tx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) unsigned int sg_len, enum dma_transfer_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) unsigned long flags, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) size_t len, avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) dma_addr_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if ((sgl == NULL) || (sg_len == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) chan->byte_align = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) mmp_pdma_config_write(dchan, &chan->slave_config, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) for_each_sg(sgl, sg, sg_len, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) addr = sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) avail = sg_dma_len(sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (addr & 0x7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) chan->byte_align = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) /* allocate and populate the descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) new = mmp_pdma_alloc_descriptor(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (!new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) dev_err(chan->dev, "no memory for desc\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (dir == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) new->desc.dsadr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) new->desc.dtadr = chan->dev_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) new->desc.dsadr = chan->dev_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) new->desc.dtadr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) if (!first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) first = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) prev->desc.ddadr = new->async_tx.phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) new->async_tx.cookie = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) async_tx_ack(&new->async_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) prev = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) /* Insert the link descriptor to the LD ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) list_add_tail(&new->node, &first->tx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) /* update metadata */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) addr += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) avail -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) } while (avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) first->async_tx.cookie = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) first->async_tx.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) /* last desc and fire IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) new->desc.ddadr = DDADR_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) new->desc.dcmd |= DCMD_ENDIRQEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) chan->dir = dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) chan->cyclic_first = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) return &first->async_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) mmp_pdma_free_desc_list(chan, &first->tx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) dma_addr_t buf_addr, size_t len, size_t period_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) enum dma_transfer_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) struct mmp_pdma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) dma_addr_t dma_src, dma_dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) if (!dchan || !len || !period_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) /* the buffer length must be a multiple of period_len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (len % period_len != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) if (period_len > PDMA_MAX_DESC_BYTES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) chan = to_mmp_pdma_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) mmp_pdma_config_write(dchan, &chan->slave_config, direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) switch (direction) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) case DMA_MEM_TO_DEV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) dma_src = buf_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) dma_dst = chan->dev_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) case DMA_DEV_TO_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) dma_dst = buf_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) dma_src = chan->dev_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) dev_err(chan->dev, "Unsupported direction for cyclic DMA\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) chan->dir = direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) /* Allocate the link descriptor from DMA pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) new = mmp_pdma_alloc_descriptor(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (!new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) dev_err(chan->dev, "no memory for desc\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) new->desc.dcmd = (chan->dcmd | DCMD_ENDIRQEN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) (DCMD_LENGTH & period_len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) new->desc.dsadr = dma_src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) new->desc.dtadr = dma_dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) if (!first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) first = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) prev->desc.ddadr = new->async_tx.phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) new->async_tx.cookie = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) async_tx_ack(&new->async_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) prev = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) len -= period_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) if (chan->dir == DMA_MEM_TO_DEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) dma_src += period_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) dma_dst += period_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) /* Insert the link descriptor to the LD ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) list_add_tail(&new->node, &first->tx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) } while (len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) first->async_tx.flags = flags; /* client is in control of this ack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) first->async_tx.cookie = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) /* make the cyclic link */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) new->desc.ddadr = first->async_tx.phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) chan->cyclic_first = first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) return &first->async_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if (first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) mmp_pdma_free_desc_list(chan, &first->tx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) static int mmp_pdma_config_write(struct dma_chan *dchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) struct dma_slave_config *cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) enum dma_transfer_direction direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) u32 maxburst = 0, addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) if (!dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) if (direction == DMA_DEV_TO_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) maxburst = cfg->src_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) width = cfg->src_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) addr = cfg->src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) } else if (direction == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) maxburst = cfg->dst_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) width = cfg->dst_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) addr = cfg->dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) chan->dcmd |= DCMD_WIDTH1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) chan->dcmd |= DCMD_WIDTH2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) chan->dcmd |= DCMD_WIDTH4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (maxburst == 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) chan->dcmd |= DCMD_BURST8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) else if (maxburst == 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) chan->dcmd |= DCMD_BURST16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) else if (maxburst == 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) chan->dcmd |= DCMD_BURST32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) chan->dir = direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) chan->dev_addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) static int mmp_pdma_config(struct dma_chan *dchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) struct dma_slave_config *cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) memcpy(&chan->slave_config, cfg, sizeof(*cfg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) static int mmp_pdma_terminate_all(struct dma_chan *dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (!dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) disable_chan(chan->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) mmp_pdma_free_phy(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) spin_lock_irqsave(&chan->desc_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) mmp_pdma_free_desc_list(chan, &chan->chain_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) mmp_pdma_free_desc_list(chan, &chan->chain_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) spin_unlock_irqrestore(&chan->desc_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) chan->idle = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) dma_cookie_t cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) struct mmp_pdma_desc_sw *sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) u32 curr, residue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) bool passed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) bool cyclic = chan->cyclic_first != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) * If the channel does not have a phy pointer anymore, it has already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) * been completed. Therefore, its residue is 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) if (!chan->phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if (chan->dir == DMA_DEV_TO_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) curr = readl(chan->phy->base + DTADR(chan->phy->idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) curr = readl(chan->phy->base + DSADR(chan->phy->idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) list_for_each_entry(sw, &chan->chain_running, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) u32 start, end, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) if (chan->dir == DMA_DEV_TO_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) start = sw->desc.dtadr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) start = sw->desc.dsadr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) len = sw->desc.dcmd & DCMD_LENGTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) end = start + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) * 'passed' will be latched once we found the descriptor which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) * lies inside the boundaries of the curr pointer. All
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) * descriptors that occur in the list _after_ we found that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) * partially handled descriptor are still to be processed and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * are hence added to the residual bytes counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) if (passed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) residue += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) } else if (curr >= start && curr <= end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) residue += end - curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) passed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) * Descriptors that have the ENDIRQEN bit set mark the end of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) * transaction chain, and the cookie assigned with it has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) * returned previously from mmp_pdma_tx_submit().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) * In case we have multiple transactions in the running chain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) * and the cookie does not match the one the user asked us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) * about, reset the state variables and start over.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) * This logic does not apply to cyclic transactions, where all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) * descriptors have the ENDIRQEN bit set, and for which we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) * can't have multiple transactions on one channel anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if (cyclic || !(sw->desc.dcmd & DCMD_ENDIRQEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) if (sw->async_tx.cookie == cookie) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) return residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) residue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) passed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) /* We should only get here in case of cyclic transactions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) return residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) dma_cookie_t cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) struct dma_tx_state *txstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) enum dma_status ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) ret = dma_cookie_status(dchan, cookie, txstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (likely(ret != DMA_ERROR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) dma_set_residue(txstate, mmp_pdma_residue(chan, cookie));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) * mmp_pdma_issue_pending - Issue the DMA start command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) * pending list ==> running list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) static void mmp_pdma_issue_pending(struct dma_chan *dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) spin_lock_irqsave(&chan->desc_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) start_pending_queue(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) spin_unlock_irqrestore(&chan->desc_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) * dma_do_tasklet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) * Do call back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) * Start pending list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) static void dma_do_tasklet(struct tasklet_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) struct mmp_pdma_chan *chan = from_tasklet(chan, t, tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) struct mmp_pdma_desc_sw *desc, *_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) LIST_HEAD(chain_cleanup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) struct dmaengine_desc_callback cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (chan->cyclic_first) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) spin_lock_irqsave(&chan->desc_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) desc = chan->cyclic_first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) dmaengine_desc_get_callback(&desc->async_tx, &cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) spin_unlock_irqrestore(&chan->desc_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) dmaengine_desc_callback_invoke(&cb, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) /* submit pending list; callback for each desc; free desc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) spin_lock_irqsave(&chan->desc_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) list_for_each_entry_safe(desc, _desc, &chan->chain_running, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) * move the descriptors to a temporary list so we can drop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) * the lock during the entire cleanup operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) list_move(&desc->node, &chain_cleanup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) * Look for the first list entry which has the ENDIRQEN flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) * set. That is the descriptor we got an interrupt for, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) * complete that transaction and its cookie.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) if (desc->desc.dcmd & DCMD_ENDIRQEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) dma_cookie_t cookie = desc->async_tx.cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) dma_cookie_complete(&desc->async_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) dev_dbg(chan->dev, "completed_cookie=%d\n", cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * The hardware is idle and ready for more when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) * chain_running list is empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) chan->idle = list_empty(&chan->chain_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) /* Start any pending transactions automatically */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) start_pending_queue(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) spin_unlock_irqrestore(&chan->desc_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) /* Run the callback for each descriptor, in order */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) list_for_each_entry_safe(desc, _desc, &chain_cleanup, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) struct dma_async_tx_descriptor *txd = &desc->async_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) /* Remove from the list of transactions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) list_del(&desc->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) /* Run the link descriptor callback function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) dmaengine_desc_get_callback(txd, &cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) dmaengine_desc_callback_invoke(&cb, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) dma_pool_free(chan->desc_pool, desc, txd->phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) static int mmp_pdma_remove(struct platform_device *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) struct mmp_pdma_device *pdev = platform_get_drvdata(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) struct mmp_pdma_phy *phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) int i, irq = 0, irq_num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) if (op->dev.of_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) of_dma_controller_free(op->dev.of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) for (i = 0; i < pdev->dma_channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) if (platform_get_irq(op, i) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) irq_num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) if (irq_num != pdev->dma_channels) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) irq = platform_get_irq(op, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) devm_free_irq(&op->dev, irq, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) for (i = 0; i < pdev->dma_channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) phy = &pdev->phy[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) irq = platform_get_irq(op, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) devm_free_irq(&op->dev, irq, phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) dma_async_device_unregister(&pdev->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) struct mmp_pdma_phy *phy = &pdev->phy[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) struct mmp_pdma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) chan = devm_kzalloc(pdev->dev, sizeof(*chan), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) if (chan == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) phy->idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) phy->base = pdev->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) if (irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) ret = devm_request_irq(pdev->dev, irq, mmp_pdma_chan_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) IRQF_SHARED, "pdma", phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) dev_err(pdev->dev, "channel request irq fail!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) spin_lock_init(&chan->desc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) chan->dev = pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) chan->chan.device = &pdev->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) tasklet_setup(&chan->tasklet, dma_do_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) INIT_LIST_HEAD(&chan->chain_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) INIT_LIST_HEAD(&chan->chain_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) /* register virt channel to dma engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) list_add_tail(&chan->chan.device_node, &pdev->device.channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) static const struct of_device_id mmp_pdma_dt_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) { .compatible = "marvell,pdma-1.0", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) static struct dma_chan *mmp_pdma_dma_xlate(struct of_phandle_args *dma_spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) struct of_dma *ofdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) struct mmp_pdma_device *d = ofdma->of_dma_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) chan = dma_get_any_slave_channel(&d->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) if (!chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) to_mmp_pdma_chan(chan)->drcmr = dma_spec->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) return chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) static int mmp_pdma_probe(struct platform_device *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) struct mmp_pdma_device *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) const struct of_device_id *of_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) struct resource *iores;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) int i, ret, irq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) int dma_channels = 0, irq_num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) const enum dma_slave_buswidth widths =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) DMA_SLAVE_BUSWIDTH_4_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) if (!pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) pdev->dev = &op->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) spin_lock_init(&pdev->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) iores = platform_get_resource(op, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) pdev->base = devm_ioremap_resource(pdev->dev, iores);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) if (IS_ERR(pdev->base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) return PTR_ERR(pdev->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) if (of_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) of_property_read_u32(pdev->dev->of_node, "#dma-channels",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) &dma_channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) else if (pdata && pdata->dma_channels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) dma_channels = pdata->dma_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) dma_channels = 32; /* default 32 channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) pdev->dma_channels = dma_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) for (i = 0; i < dma_channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) if (platform_get_irq_optional(op, i) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) irq_num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) pdev->phy = devm_kcalloc(pdev->dev, dma_channels, sizeof(*pdev->phy),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) if (pdev->phy == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) INIT_LIST_HEAD(&pdev->device.channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) if (irq_num != dma_channels) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) /* all chan share one irq, demux inside */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) irq = platform_get_irq(op, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) ret = devm_request_irq(pdev->dev, irq, mmp_pdma_int_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) IRQF_SHARED, "pdma", pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) for (i = 0; i < dma_channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) irq = (irq_num != dma_channels) ? 0 : platform_get_irq(op, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) ret = mmp_pdma_chan_init(pdev, i, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) dma_cap_set(DMA_CYCLIC, pdev->device.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) dma_cap_set(DMA_PRIVATE, pdev->device.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) pdev->device.dev = &op->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) pdev->device.device_tx_status = mmp_pdma_tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) pdev->device.device_prep_dma_cyclic = mmp_pdma_prep_dma_cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) pdev->device.device_issue_pending = mmp_pdma_issue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) pdev->device.device_config = mmp_pdma_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) pdev->device.device_terminate_all = mmp_pdma_terminate_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) pdev->device.copy_align = DMAENGINE_ALIGN_8_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) pdev->device.src_addr_widths = widths;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) pdev->device.dst_addr_widths = widths;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) pdev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) pdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) if (pdev->dev->coherent_dma_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) dma_set_mask(pdev->dev, DMA_BIT_MASK(64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) ret = dma_async_device_register(&pdev->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) dev_err(pdev->device.dev, "unable to register\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) if (op->dev.of_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) /* Device-tree DMA controller registration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) ret = of_dma_controller_register(op->dev.of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) mmp_pdma_dma_xlate, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) dev_err(&op->dev, "of_dma_controller_register failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) platform_set_drvdata(op, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) dev_info(pdev->device.dev, "initialized %d channels\n", dma_channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) static const struct platform_device_id mmp_pdma_id_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) { "mmp-pdma", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) static struct platform_driver mmp_pdma_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) .name = "mmp-pdma",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) .of_match_table = mmp_pdma_dt_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) .id_table = mmp_pdma_id_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) .probe = mmp_pdma_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) .remove = mmp_pdma_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) if (chan->device->dev->driver != &mmp_pdma_driver.driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) c->drcmr = *(unsigned int *)param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) EXPORT_SYMBOL_GPL(mmp_pdma_filter_fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) module_platform_driver(mmp_pdma_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) MODULE_DESCRIPTION("MARVELL MMP Peripheral DMA Driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) MODULE_AUTHOR("Marvell International Ltd.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) MODULE_LICENSE("GPL v2");