^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright 2015 Robert Jarzmik <robert.jarzmik@free.fr>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/platform_data/mmp_dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/dmapool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/of_dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/dma/pxa-dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "dmaengine.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "virt-dma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define DCSR(n) (0x0000 + ((n) << 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define DALGN(n) 0x00a0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define DINT 0x00f0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define DDADR(n) (0x0200 + ((n) << 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define DSADR(n) (0x0204 + ((n) << 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define DTADR(n) (0x0208 + ((n) << 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define DCMD(n) (0x020c + ((n) << 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define PXA_DCSR_RUN BIT(31) /* Run Bit (read / write) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define PXA_DCSR_NODESC BIT(30) /* No-Descriptor Fetch (read / write) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define PXA_DCSR_STOPIRQEN BIT(29) /* Stop Interrupt Enable (R/W) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define PXA_DCSR_REQPEND BIT(8) /* Request Pending (read-only) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define PXA_DCSR_STOPSTATE BIT(3) /* Stop State (read-only) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define PXA_DCSR_ENDINTR BIT(2) /* End Interrupt (read / write) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define PXA_DCSR_STARTINTR BIT(1) /* Start Interrupt (read / write) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define PXA_DCSR_BUSERR BIT(0) /* Bus Error Interrupt (read / write) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define PXA_DCSR_EORIRQEN BIT(28) /* End of Receive IRQ Enable (R/W) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define PXA_DCSR_EORJMPEN BIT(27) /* Jump to next descriptor on EOR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define PXA_DCSR_EORSTOPEN BIT(26) /* STOP on an EOR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define PXA_DCSR_SETCMPST BIT(25) /* Set Descriptor Compare Status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define PXA_DCSR_CLRCMPST BIT(24) /* Clear Descriptor Compare Status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define PXA_DCSR_CMPST BIT(10) /* The Descriptor Compare Status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define PXA_DCSR_EORINTR BIT(9) /* The end of Receive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define DRCMR_MAPVLD BIT(7) /* Map Valid (read / write) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define DDADR_STOP BIT(0) /* Stop (read / write) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define PXA_DCMD_INCSRCADDR BIT(31) /* Source Address Increment Setting. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define PXA_DCMD_INCTRGADDR BIT(30) /* Target Address Increment Setting. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define PXA_DCMD_FLOWSRC BIT(29) /* Flow Control by the source. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define PXA_DCMD_FLOWTRG BIT(28) /* Flow Control by the target. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define PXA_DCMD_STARTIRQEN BIT(22) /* Start Interrupt Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define PXA_DCMD_ENDIRQEN BIT(21) /* End Interrupt Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define PXA_DCMD_ENDIAN BIT(18) /* Device Endian-ness. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define PXA_DCMD_BURST8 (1 << 16) /* 8 byte burst */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define PXA_DCMD_BURST16 (2 << 16) /* 16 byte burst */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define PXA_DCMD_BURST32 (3 << 16) /* 32 byte burst */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define PXA_DCMD_WIDTH1 (1 << 14) /* 1 byte width */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define PXA_DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define PXA_DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define PXA_DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define PDMA_ALIGNMENT 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define PDMA_MAX_DESC_BYTES (PXA_DCMD_LENGTH & ~((1 << PDMA_ALIGNMENT) - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct pxad_desc_hw {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) u32 ddadr; /* Points to the next descriptor + flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) u32 dsadr; /* DSADR value for the current transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) u32 dtadr; /* DTADR value for the current transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) u32 dcmd; /* DCMD value for the current transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) } __aligned(16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct pxad_desc_sw {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct virt_dma_desc vd; /* Virtual descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) int nb_desc; /* Number of hw. descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) size_t len; /* Number of bytes xfered */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) dma_addr_t first; /* First descriptor's addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /* At least one descriptor has an src/dst address not multiple of 8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) bool misaligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) bool cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) struct dma_pool *desc_pool; /* Channel's used allocator */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct pxad_desc_hw *hw_desc[]; /* DMA coherent descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct pxad_phy {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct pxad_chan *vchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct pxad_chan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct virt_dma_chan vc; /* Virtual channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) u32 drcmr; /* Requestor of the channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) enum pxad_chan_prio prio; /* Required priority of phy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * At least one desc_sw in submitted or issued transfers on this channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * has one address such as: addr % 8 != 0. This implies the DALGN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * setting on the phy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) bool misaligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct dma_slave_config cfg; /* Runtime config */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /* protected by vc->lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct pxad_phy *phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct dma_pool *desc_pool; /* Descriptors pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) dma_cookie_t bus_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) wait_queue_head_t wq_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct pxad_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct dma_device slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) int nr_chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) int nr_requestors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct pxad_phy *phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) spinlock_t phy_lock; /* Phy association */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct dentry *dbgfs_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct dentry **dbgfs_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #define tx_to_pxad_desc(tx) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) container_of(tx, struct pxad_desc_sw, async_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define to_pxad_chan(dchan) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) container_of(dchan, struct pxad_chan, vc.chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #define to_pxad_dev(dmadev) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) container_of(dmadev, struct pxad_device, slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #define to_pxad_sw_desc(_vd) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) container_of((_vd), struct pxad_desc_sw, vd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #define _phy_readl_relaxed(phy, _reg) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) readl_relaxed((phy)->base + _reg((phy)->idx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #define phy_readl_relaxed(phy, _reg) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) u32 _v; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) _v = readl_relaxed((phy)->base + _reg((phy)->idx)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) dev_vdbg(&phy->vchan->vc.chan.dev->device, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) "%s(): readl(%s): 0x%08x\n", __func__, #_reg, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) _v); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) _v; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #define phy_writel(phy, val, _reg) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) writel((val), (phy)->base + _reg((phy)->idx)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) dev_vdbg(&phy->vchan->vc.chan.dev->device, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) "%s(): writel(0x%08x, %s)\n", \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) __func__, (u32)(val), #_reg); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) #define phy_writel_relaxed(phy, val, _reg) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) writel_relaxed((val), (phy)->base + _reg((phy)->idx)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) dev_vdbg(&phy->vchan->vc.chan.dev->device, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) "%s(): writel_relaxed(0x%08x, %s)\n", \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) __func__, (u32)(val), #_reg); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static unsigned int pxad_drcmr(unsigned int line)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (line < 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) return 0x100 + line * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) return 0x1000 + line * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static bool pxad_filter_fn(struct dma_chan *chan, void *param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * Debug fs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static int requester_chan_show(struct seq_file *s, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct pxad_phy *phy = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) u32 drcmr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) seq_printf(s, "DMA channel %d requester :\n", phy->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) for (i = 0; i < 70; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) drcmr = readl_relaxed(phy->base + pxad_drcmr(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if ((drcmr & DRCMR_CHLNUM) == phy->idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) seq_printf(s, "\tRequester %d (MAPVLD=%d)\n", i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) !!(drcmr & DRCMR_MAPVLD));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static inline int dbg_burst_from_dcmd(u32 dcmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) int burst = (dcmd >> 16) & 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) return burst ? 4 << burst : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) static int is_phys_valid(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) return pfn_valid(__phys_to_pfn(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) #define PXA_DCSR_STR(flag) (dcsr & PXA_DCSR_##flag ? #flag" " : "")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) #define PXA_DCMD_STR(flag) (dcmd & PXA_DCMD_##flag ? #flag" " : "")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) static int descriptors_show(struct seq_file *s, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct pxad_phy *phy = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) int i, max_show = 20, burst, width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) u32 dcmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) unsigned long phys_desc, ddadr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct pxad_desc_hw *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) phys_desc = ddadr = _phy_readl_relaxed(phy, DDADR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) seq_printf(s, "DMA channel %d descriptors :\n", phy->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) seq_printf(s, "[%03d] First descriptor unknown\n", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) for (i = 1; i < max_show && is_phys_valid(phys_desc); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) desc = phys_to_virt(phys_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) dcmd = desc->dcmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) burst = dbg_burst_from_dcmd(dcmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) width = (1 << ((dcmd >> 14) & 0x3)) >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) seq_printf(s, "[%03d] Desc at %08lx(virt %p)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) i, phys_desc, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) seq_printf(s, "\tDDADR = %08x\n", desc->ddadr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) seq_printf(s, "\tDSADR = %08x\n", desc->dsadr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) seq_printf(s, "\tDTADR = %08x\n", desc->dtadr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) seq_printf(s, "\tDCMD = %08x (%s%s%s%s%s%s%sburst=%d width=%d len=%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) dcmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) PXA_DCMD_STR(INCSRCADDR), PXA_DCMD_STR(INCTRGADDR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) PXA_DCMD_STR(FLOWSRC), PXA_DCMD_STR(FLOWTRG),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) PXA_DCMD_STR(STARTIRQEN), PXA_DCMD_STR(ENDIRQEN),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) PXA_DCMD_STR(ENDIAN), burst, width,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) dcmd & PXA_DCMD_LENGTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) phys_desc = desc->ddadr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (i == max_show)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) seq_printf(s, "[%03d] Desc at %08lx ... max display reached\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) i, phys_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) seq_printf(s, "[%03d] Desc at %08lx is %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) i, phys_desc, phys_desc == DDADR_STOP ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) "DDADR_STOP" : "invalid");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) static int chan_state_show(struct seq_file *s, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct pxad_phy *phy = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) u32 dcsr, dcmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) int burst, width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) static const char * const str_prio[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) "high", "normal", "low", "invalid"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) dcsr = _phy_readl_relaxed(phy, DCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) dcmd = _phy_readl_relaxed(phy, DCMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) burst = dbg_burst_from_dcmd(dcmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) width = (1 << ((dcmd >> 14) & 0x3)) >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) seq_printf(s, "DMA channel %d\n", phy->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) seq_printf(s, "\tPriority : %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) str_prio[(phy->idx & 0xf) / 4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) seq_printf(s, "\tUnaligned transfer bit: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) _phy_readl_relaxed(phy, DALGN) & BIT(phy->idx) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) "yes" : "no");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) seq_printf(s, "\tDCSR = %08x (%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) dcsr, PXA_DCSR_STR(RUN), PXA_DCSR_STR(NODESC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) PXA_DCSR_STR(STOPIRQEN), PXA_DCSR_STR(EORIRQEN),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) PXA_DCSR_STR(EORJMPEN), PXA_DCSR_STR(EORSTOPEN),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) PXA_DCSR_STR(SETCMPST), PXA_DCSR_STR(CLRCMPST),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) PXA_DCSR_STR(CMPST), PXA_DCSR_STR(EORINTR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) PXA_DCSR_STR(REQPEND), PXA_DCSR_STR(STOPSTATE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) PXA_DCSR_STR(ENDINTR), PXA_DCSR_STR(STARTINTR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) PXA_DCSR_STR(BUSERR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) seq_printf(s, "\tDCMD = %08x (%s%s%s%s%s%s%sburst=%d width=%d len=%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) dcmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) PXA_DCMD_STR(INCSRCADDR), PXA_DCMD_STR(INCTRGADDR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) PXA_DCMD_STR(FLOWSRC), PXA_DCMD_STR(FLOWTRG),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) PXA_DCMD_STR(STARTIRQEN), PXA_DCMD_STR(ENDIRQEN),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) PXA_DCMD_STR(ENDIAN), burst, width, dcmd & PXA_DCMD_LENGTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) seq_printf(s, "\tDSADR = %08x\n", _phy_readl_relaxed(phy, DSADR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) seq_printf(s, "\tDTADR = %08x\n", _phy_readl_relaxed(phy, DTADR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) seq_printf(s, "\tDDADR = %08x\n", _phy_readl_relaxed(phy, DDADR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) static int state_show(struct seq_file *s, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct pxad_device *pdev = s->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /* basic device status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) seq_puts(s, "DMA engine status\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) seq_printf(s, "\tChannel number: %d\n", pdev->nr_chans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) DEFINE_SHOW_ATTRIBUTE(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) DEFINE_SHOW_ATTRIBUTE(chan_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) DEFINE_SHOW_ATTRIBUTE(descriptors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) DEFINE_SHOW_ATTRIBUTE(requester_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) static struct dentry *pxad_dbg_alloc_chan(struct pxad_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) int ch, struct dentry *chandir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) char chan_name[11];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) struct dentry *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) void *dt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) scnprintf(chan_name, sizeof(chan_name), "%d", ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) chan = debugfs_create_dir(chan_name, chandir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) dt = (void *)&pdev->phys[ch];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) debugfs_create_file("state", 0400, chan, dt, &chan_state_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) debugfs_create_file("descriptors", 0400, chan, dt, &descriptors_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) debugfs_create_file("requesters", 0400, chan, dt, &requester_chan_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) return chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) static void pxad_init_debugfs(struct pxad_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) struct dentry *chandir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) pdev->dbgfs_chan =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) kmalloc_array(pdev->nr_chans, sizeof(struct dentry *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (!pdev->dbgfs_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) pdev->dbgfs_root = debugfs_create_dir(dev_name(pdev->slave.dev), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) debugfs_create_file("state", 0400, pdev->dbgfs_root, pdev, &state_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) chandir = debugfs_create_dir("channels", pdev->dbgfs_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) for (i = 0; i < pdev->nr_chans; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) pdev->dbgfs_chan[i] = pxad_dbg_alloc_chan(pdev, i, chandir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static void pxad_cleanup_debugfs(struct pxad_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) debugfs_remove_recursive(pdev->dbgfs_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) static inline void pxad_init_debugfs(struct pxad_device *pdev) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) static inline void pxad_cleanup_debugfs(struct pxad_device *pdev) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) static struct pxad_phy *lookup_phy(struct pxad_chan *pchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) int prio, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) struct pxad_device *pdev = to_pxad_dev(pchan->vc.chan.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) struct pxad_phy *phy, *found = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * dma channel priorities
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * ch 0 - 3, 16 - 19 <--> (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * ch 4 - 7, 20 - 23 <--> (1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * ch 8 - 11, 24 - 27 <--> (2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * ch 12 - 15, 28 - 31 <--> (3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) spin_lock_irqsave(&pdev->phy_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) for (prio = pchan->prio; prio >= PXAD_PRIO_HIGHEST; prio--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) for (i = 0; i < pdev->nr_chans; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (prio != (i & 0xf) >> 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) phy = &pdev->phys[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (!phy->vchan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) phy->vchan = pchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) found = phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) spin_unlock_irqrestore(&pdev->phy_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) dev_dbg(&pchan->vc.chan.dev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) "%s(): phy=%p(%d)\n", __func__, found,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) found ? found->idx : -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) return found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) static void pxad_free_phy(struct pxad_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) dev_dbg(&chan->vc.chan.dev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) "%s(): freeing\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (!chan->phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) /* clear the channel mapping in DRCMR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (chan->drcmr <= pdev->nr_requestors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) reg = pxad_drcmr(chan->drcmr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) writel_relaxed(0, chan->phy->base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) spin_lock_irqsave(&pdev->phy_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) chan->phy->vchan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) chan->phy = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) spin_unlock_irqrestore(&pdev->phy_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) static bool is_chan_running(struct pxad_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) u32 dcsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) struct pxad_phy *phy = chan->phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (!phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) dcsr = phy_readl_relaxed(phy, DCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) return dcsr & PXA_DCSR_RUN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) static bool is_running_chan_misaligned(struct pxad_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) u32 dalgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) BUG_ON(!chan->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) dalgn = phy_readl_relaxed(chan->phy, DALGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) return dalgn & (BIT(chan->phy->idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) static void phy_enable(struct pxad_phy *phy, bool misaligned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) struct pxad_device *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) u32 reg, dalgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (!phy->vchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) dev_dbg(&phy->vchan->vc.chan.dev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) "%s(); phy=%p(%d) misaligned=%d\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) phy, phy->idx, misaligned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) pdev = to_pxad_dev(phy->vchan->vc.chan.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) if (phy->vchan->drcmr <= pdev->nr_requestors) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) reg = pxad_drcmr(phy->vchan->drcmr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) dalgn = phy_readl_relaxed(phy, DALGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (misaligned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) dalgn |= BIT(phy->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) dalgn &= ~BIT(phy->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) phy_writel_relaxed(phy, dalgn, DALGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) phy_writel(phy, PXA_DCSR_STOPIRQEN | PXA_DCSR_ENDINTR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) PXA_DCSR_BUSERR | PXA_DCSR_RUN, DCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) static void phy_disable(struct pxad_phy *phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) u32 dcsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (!phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) dcsr = phy_readl_relaxed(phy, DCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) dev_dbg(&phy->vchan->vc.chan.dev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) "%s(): phy=%p(%d)\n", __func__, phy, phy->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) phy_writel(phy, dcsr & ~PXA_DCSR_RUN & ~PXA_DCSR_STOPIRQEN, DCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) static void pxad_launch_chan(struct pxad_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) struct pxad_desc_sw *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) dev_dbg(&chan->vc.chan.dev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) "%s(): desc=%p\n", __func__, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (!chan->phy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) chan->phy = lookup_phy(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if (!chan->phy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) dev_dbg(&chan->vc.chan.dev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) "%s(): no free dma channel\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) chan->bus_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * Program the descriptor's address into the DMA controller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * then start the DMA transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) phy_writel(chan->phy, desc->first, DDADR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) phy_enable(chan->phy, chan->misaligned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) wake_up(&chan->wq_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) static void set_updater_desc(struct pxad_desc_sw *sw_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) struct pxad_desc_hw *updater =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) sw_desc->hw_desc[sw_desc->nb_desc - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) dma_addr_t dma = sw_desc->hw_desc[sw_desc->nb_desc - 2]->ddadr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) updater->ddadr = DDADR_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) updater->dsadr = dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) updater->dtadr = dma + 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) updater->dcmd = PXA_DCMD_WIDTH4 | PXA_DCMD_BURST32 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) (PXA_DCMD_LENGTH & sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) if (flags & DMA_PREP_INTERRUPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) updater->dcmd |= PXA_DCMD_ENDIRQEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (sw_desc->cyclic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) sw_desc->hw_desc[sw_desc->nb_desc - 2]->ddadr = sw_desc->first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) static bool is_desc_completed(struct virt_dma_desc *vd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) struct pxad_desc_sw *sw_desc = to_pxad_sw_desc(vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) struct pxad_desc_hw *updater =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) sw_desc->hw_desc[sw_desc->nb_desc - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) return updater->dtadr != (updater->dsadr + 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) static void pxad_desc_chain(struct virt_dma_desc *vd1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) struct virt_dma_desc *vd2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) struct pxad_desc_sw *desc1 = to_pxad_sw_desc(vd1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) struct pxad_desc_sw *desc2 = to_pxad_sw_desc(vd2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) dma_addr_t dma_to_chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) dma_to_chain = desc2->first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) desc1->hw_desc[desc1->nb_desc - 1]->ddadr = dma_to_chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) static bool pxad_try_hotchain(struct virt_dma_chan *vc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) struct virt_dma_desc *vd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) struct virt_dma_desc *vd_last_issued = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) struct pxad_chan *chan = to_pxad_chan(&vc->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * Attempt to hot chain the tx if the phy is still running. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * considered successful only if either the channel is still running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * after the chaining, or if the chained transfer is completed after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * having been hot chained.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * A change of alignment is not allowed, and forbids hotchaining.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (is_chan_running(chan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) BUG_ON(list_empty(&vc->desc_issued));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (!is_running_chan_misaligned(chan) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) to_pxad_sw_desc(vd)->misaligned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) vd_last_issued = list_entry(vc->desc_issued.prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) struct virt_dma_desc, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) pxad_desc_chain(vd_last_issued, vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) if (is_chan_running(chan) || is_desc_completed(vd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) static unsigned int clear_chan_irq(struct pxad_phy *phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) u32 dcsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) u32 dint = readl(phy->base + DINT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (!(dint & BIT(phy->idx)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) return PXA_DCSR_RUN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) /* clear irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) dcsr = phy_readl_relaxed(phy, DCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) phy_writel(phy, dcsr, DCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if ((dcsr & PXA_DCSR_BUSERR) && (phy->vchan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) dev_warn(&phy->vchan->vc.chan.dev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) "%s(chan=%p): PXA_DCSR_BUSERR\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) __func__, &phy->vchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) return dcsr & ~PXA_DCSR_RUN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) struct pxad_phy *phy = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) struct pxad_chan *chan = phy->vchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) struct virt_dma_desc *vd, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) unsigned int dcsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) bool vd_completed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) dma_cookie_t last_started = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) BUG_ON(!chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) dcsr = clear_chan_irq(phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) if (dcsr & PXA_DCSR_RUN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) spin_lock_irqsave(&chan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) list_for_each_entry_safe(vd, tmp, &chan->vc.desc_issued, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) vd_completed = is_desc_completed(vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) dev_dbg(&chan->vc.chan.dev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) "%s(): checking txd %p[%x]: completed=%d dcsr=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) __func__, vd, vd->tx.cookie, vd_completed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) dcsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) last_started = vd->tx.cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) if (to_pxad_sw_desc(vd)->cyclic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) vchan_cyclic_callback(vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (vd_completed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) list_del(&vd->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) vchan_cookie_complete(vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) if (dcsr & PXA_DCSR_BUSERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) chan->bus_error = last_started;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) phy_disable(phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) if (!chan->bus_error && dcsr & PXA_DCSR_STOPSTATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) dev_dbg(&chan->vc.chan.dev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) "%s(): channel stopped, submitted_empty=%d issued_empty=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) list_empty(&chan->vc.desc_submitted),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) list_empty(&chan->vc.desc_issued));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) phy_writel_relaxed(phy, dcsr & ~PXA_DCSR_STOPIRQEN, DCSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (list_empty(&chan->vc.desc_issued)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) chan->misaligned =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) !list_empty(&chan->vc.desc_submitted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) vd = list_first_entry(&chan->vc.desc_issued,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) struct virt_dma_desc, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) pxad_launch_chan(chan, to_pxad_sw_desc(vd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) spin_unlock_irqrestore(&chan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) wake_up(&chan->wq_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) static irqreturn_t pxad_int_handler(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) struct pxad_device *pdev = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) struct pxad_phy *phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) u32 dint = readl(pdev->base + DINT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) int i, ret = IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) while (dint) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) i = __ffs(dint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) dint &= (dint - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) phy = &pdev->phys[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) if (pxad_chan_handler(irq, phy) == IRQ_HANDLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) ret = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) static int pxad_alloc_chan_resources(struct dma_chan *dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) struct pxad_chan *chan = to_pxad_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) if (chan->desc_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) chan->desc_pool = dma_pool_create(dma_chan_name(dchan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) pdev->slave.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) sizeof(struct pxad_desc_hw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) __alignof__(struct pxad_desc_hw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) if (!chan->desc_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) dev_err(&chan->vc.chan.dev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) "%s(): unable to allocate descriptor pool\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) static void pxad_free_chan_resources(struct dma_chan *dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) struct pxad_chan *chan = to_pxad_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) vchan_free_chan_resources(&chan->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) dma_pool_destroy(chan->desc_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) chan->desc_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) chan->drcmr = U32_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) chan->prio = PXAD_PRIO_LOWEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) static void pxad_free_desc(struct virt_dma_desc *vd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) dma_addr_t dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) struct pxad_desc_sw *sw_desc = to_pxad_sw_desc(vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) BUG_ON(sw_desc->nb_desc == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) for (i = sw_desc->nb_desc - 1; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) if (i > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) dma = sw_desc->hw_desc[i - 1]->ddadr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) dma = sw_desc->first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) dma_pool_free(sw_desc->desc_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) sw_desc->hw_desc[i], dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) sw_desc->nb_desc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) kfree(sw_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) static struct pxad_desc_sw *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) pxad_alloc_desc(struct pxad_chan *chan, unsigned int nb_hw_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) struct pxad_desc_sw *sw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) dma_addr_t dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) sw_desc = kzalloc(sizeof(*sw_desc) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) nb_hw_desc * sizeof(struct pxad_desc_hw *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (!sw_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) sw_desc->desc_pool = chan->desc_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) for (i = 0; i < nb_hw_desc; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) sw_desc->hw_desc[i] = dma_pool_alloc(sw_desc->desc_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) GFP_NOWAIT, &dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) if (!sw_desc->hw_desc[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) dev_err(&chan->vc.chan.dev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) "%s(): Couldn't allocate the %dth hw_desc from dma_pool %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) __func__, i, sw_desc->desc_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) if (i == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) sw_desc->first = dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) sw_desc->hw_desc[i - 1]->ddadr = dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) sw_desc->nb_desc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) return sw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) pxad_free_desc(&sw_desc->vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) static dma_cookie_t pxad_tx_submit(struct dma_async_tx_descriptor *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) struct virt_dma_chan *vc = to_virt_chan(tx->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) struct pxad_chan *chan = to_pxad_chan(&vc->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) struct virt_dma_desc *vd_chained = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) *vd = container_of(tx, struct virt_dma_desc, tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) set_updater_desc(to_pxad_sw_desc(vd), tx->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) spin_lock_irqsave(&vc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) cookie = dma_cookie_assign(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) if (list_empty(&vc->desc_submitted) && pxad_try_hotchain(vc, vd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) list_move_tail(&vd->node, &vc->desc_issued);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) dev_dbg(&chan->vc.chan.dev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) "%s(): txd %p[%x]: submitted (hot linked)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) __func__, vd, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * Fallback to placing the tx in the submitted queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if (!list_empty(&vc->desc_submitted)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) vd_chained = list_entry(vc->desc_submitted.prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) struct virt_dma_desc, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) * Only chain the descriptors if no new misalignment is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) * introduced. If a new misalignment is chained, let the channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) * stop, and be relaunched in misalign mode from the irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) * handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) if (chan->misaligned || !to_pxad_sw_desc(vd)->misaligned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) pxad_desc_chain(vd_chained, vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) vd_chained = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) dev_dbg(&chan->vc.chan.dev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) "%s(): txd %p[%x]: submitted (%s linked)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) __func__, vd, cookie, vd_chained ? "cold" : "not");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) list_move_tail(&vd->node, &vc->desc_submitted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) chan->misaligned |= to_pxad_sw_desc(vd)->misaligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) spin_unlock_irqrestore(&vc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) return cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) static void pxad_issue_pending(struct dma_chan *dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) struct pxad_chan *chan = to_pxad_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) struct virt_dma_desc *vd_first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) spin_lock_irqsave(&chan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (list_empty(&chan->vc.desc_submitted))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) vd_first = list_first_entry(&chan->vc.desc_submitted,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) struct virt_dma_desc, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) dev_dbg(&chan->vc.chan.dev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) "%s(): txd %p[%x]", __func__, vd_first, vd_first->tx.cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) vchan_issue_pending(&chan->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) if (!pxad_try_hotchain(&chan->vc, vd_first))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) pxad_launch_chan(chan, to_pxad_sw_desc(vd_first));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) spin_unlock_irqrestore(&chan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) static inline struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) pxad_tx_prep(struct virt_dma_chan *vc, struct virt_dma_desc *vd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) unsigned long tx_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) struct dma_async_tx_descriptor *tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) struct pxad_chan *chan = container_of(vc, struct pxad_chan, vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) INIT_LIST_HEAD(&vd->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) tx = vchan_tx_prep(vc, vd, tx_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) tx->tx_submit = pxad_tx_submit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) dev_dbg(&chan->vc.chan.dev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) "%s(): vc=%p txd=%p[%x] flags=0x%lx\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) vc, vd, vd->tx.cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) tx_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) return tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) static void pxad_get_config(struct pxad_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) enum dma_transfer_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) u32 *dcmd, u32 *dev_src, u32 *dev_dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) u32 maxburst = 0, dev_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) *dcmd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) if (dir == DMA_DEV_TO_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) maxburst = chan->cfg.src_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) width = chan->cfg.src_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) dev_addr = chan->cfg.src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) *dev_src = dev_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) *dcmd |= PXA_DCMD_INCTRGADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) if (chan->drcmr <= pdev->nr_requestors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) *dcmd |= PXA_DCMD_FLOWSRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) if (dir == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) maxburst = chan->cfg.dst_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) width = chan->cfg.dst_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) dev_addr = chan->cfg.dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) *dev_dst = dev_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) *dcmd |= PXA_DCMD_INCSRCADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) if (chan->drcmr <= pdev->nr_requestors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) *dcmd |= PXA_DCMD_FLOWTRG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) if (dir == DMA_MEM_TO_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) *dcmd |= PXA_DCMD_BURST32 | PXA_DCMD_INCTRGADDR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) PXA_DCMD_INCSRCADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) dev_dbg(&chan->vc.chan.dev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) "%s(): dev_addr=0x%x maxburst=%d width=%d dir=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) __func__, dev_addr, maxburst, width, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) *dcmd |= PXA_DCMD_WIDTH1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) *dcmd |= PXA_DCMD_WIDTH2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) *dcmd |= PXA_DCMD_WIDTH4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) if (maxburst == 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) *dcmd |= PXA_DCMD_BURST8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) else if (maxburst == 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) *dcmd |= PXA_DCMD_BURST16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) else if (maxburst == 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) *dcmd |= PXA_DCMD_BURST32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) pxad_prep_memcpy(struct dma_chan *dchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) dma_addr_t dma_dst, dma_addr_t dma_src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) size_t len, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) struct pxad_chan *chan = to_pxad_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) struct pxad_desc_sw *sw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) struct pxad_desc_hw *hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) u32 dcmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) unsigned int i, nb_desc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) size_t copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) if (!dchan || !len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) dev_dbg(&chan->vc.chan.dev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) "%s(): dma_dst=0x%lx dma_src=0x%lx len=%zu flags=%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) __func__, (unsigned long)dma_dst, (unsigned long)dma_src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) pxad_get_config(chan, DMA_MEM_TO_MEM, &dcmd, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) nb_desc = DIV_ROUND_UP(len, PDMA_MAX_DESC_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) sw_desc = pxad_alloc_desc(chan, nb_desc + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) if (!sw_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) sw_desc->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) if (!IS_ALIGNED(dma_src, 1 << PDMA_ALIGNMENT) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) !IS_ALIGNED(dma_dst, 1 << PDMA_ALIGNMENT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) sw_desc->misaligned = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) hw_desc = sw_desc->hw_desc[i++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) hw_desc->dcmd = dcmd | (PXA_DCMD_LENGTH & copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) hw_desc->dsadr = dma_src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) hw_desc->dtadr = dma_dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) len -= copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) dma_src += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) dma_dst += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) } while (len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) set_updater_desc(sw_desc, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) return pxad_tx_prep(&chan->vc, &sw_desc->vd, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) pxad_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) unsigned int sg_len, enum dma_transfer_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) unsigned long flags, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) struct pxad_chan *chan = to_pxad_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) struct pxad_desc_sw *sw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) size_t len, avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) dma_addr_t dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) u32 dcmd, dsadr = 0, dtadr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) unsigned int nb_desc = 0, i, j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) if ((sgl == NULL) || (sg_len == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) dev_dbg(&chan->vc.chan.dev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) "%s(): dir=%d flags=%lx\n", __func__, dir, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) for_each_sg(sgl, sg, sg_len, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) nb_desc += DIV_ROUND_UP(sg_dma_len(sg), PDMA_MAX_DESC_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) sw_desc = pxad_alloc_desc(chan, nb_desc + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) if (!sw_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) for_each_sg(sgl, sg, sg_len, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) dma = sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) avail = sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) sw_desc->len += avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) if (dma & 0x7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) sw_desc->misaligned = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) sw_desc->hw_desc[j]->dcmd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) dcmd | (PXA_DCMD_LENGTH & len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) sw_desc->hw_desc[j]->dsadr = dsadr ? dsadr : dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) sw_desc->hw_desc[j++]->dtadr = dtadr ? dtadr : dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) dma += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) avail -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) } while (avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) set_updater_desc(sw_desc, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) return pxad_tx_prep(&chan->vc, &sw_desc->vd, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) pxad_prep_dma_cyclic(struct dma_chan *dchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) dma_addr_t buf_addr, size_t len, size_t period_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) enum dma_transfer_direction dir, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) struct pxad_chan *chan = to_pxad_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) struct pxad_desc_sw *sw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) struct pxad_desc_hw **phw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) dma_addr_t dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) u32 dcmd, dsadr = 0, dtadr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) unsigned int nb_desc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) if (!dchan || !len || !period_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) if ((dir != DMA_DEV_TO_MEM) && (dir != DMA_MEM_TO_DEV)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) dev_err(&chan->vc.chan.dev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) "Unsupported direction for cyclic DMA\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) /* the buffer length must be a multiple of period_len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) if (len % period_len != 0 || period_len > PDMA_MAX_DESC_BYTES ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) !IS_ALIGNED(period_len, 1 << PDMA_ALIGNMENT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) dcmd |= PXA_DCMD_ENDIRQEN | (PXA_DCMD_LENGTH & period_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) dev_dbg(&chan->vc.chan.dev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) "%s(): buf_addr=0x%lx len=%zu period=%zu dir=%d flags=%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) __func__, (unsigned long)buf_addr, len, period_len, dir, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) nb_desc = DIV_ROUND_UP(period_len, PDMA_MAX_DESC_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) nb_desc *= DIV_ROUND_UP(len, period_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) sw_desc = pxad_alloc_desc(chan, nb_desc + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) if (!sw_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) sw_desc->cyclic = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) sw_desc->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) phw_desc = sw_desc->hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) dma = buf_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) phw_desc[0]->dsadr = dsadr ? dsadr : dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) phw_desc[0]->dtadr = dtadr ? dtadr : dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) phw_desc[0]->dcmd = dcmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) phw_desc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) dma += period_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) len -= period_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) } while (len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) set_updater_desc(sw_desc, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) return pxad_tx_prep(&chan->vc, &sw_desc->vd, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) static int pxad_config(struct dma_chan *dchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) struct dma_slave_config *cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) struct pxad_chan *chan = to_pxad_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) if (!dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) chan->cfg = *cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) static int pxad_terminate_all(struct dma_chan *dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) struct pxad_chan *chan = to_pxad_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) struct virt_dma_desc *vd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) struct pxad_phy *phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) LIST_HEAD(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) dev_dbg(&chan->vc.chan.dev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) "%s(): vchan %p: terminate all\n", __func__, &chan->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) spin_lock_irqsave(&chan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) vchan_get_all_descriptors(&chan->vc, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) list_for_each_entry(vd, &head, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) dev_dbg(&chan->vc.chan.dev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) "%s(): cancelling txd %p[%x] (completed=%d)", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) vd, vd->tx.cookie, is_desc_completed(vd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) phy = chan->phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) if (phy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) phy_disable(chan->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) pxad_free_phy(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) chan->phy = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) spin_lock(&pdev->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) phy->vchan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) spin_unlock(&pdev->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) spin_unlock_irqrestore(&chan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) vchan_dma_desc_free_list(&chan->vc, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) static unsigned int pxad_residue(struct pxad_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) dma_cookie_t cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) struct virt_dma_desc *vd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) struct pxad_desc_sw *sw_desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) struct pxad_desc_hw *hw_desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) u32 curr, start, len, end, residue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) bool passed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) * If the channel does not have a phy pointer anymore, it has already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) * been completed. Therefore, its residue is 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) if (!chan->phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) spin_lock_irqsave(&chan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) vd = vchan_find_desc(&chan->vc, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) if (!vd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) sw_desc = to_pxad_sw_desc(vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) curr = phy_readl_relaxed(chan->phy, DSADR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) curr = phy_readl_relaxed(chan->phy, DTADR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) * curr has to be actually read before checking descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) * completion, so that a curr inside a status updater
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) * descriptor implies the following test returns true, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) * preventing reordering of curr load and the test.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) if (is_desc_completed(vd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) for (i = 0; i < sw_desc->nb_desc - 1; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) hw_desc = sw_desc->hw_desc[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) start = hw_desc->dsadr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) start = hw_desc->dtadr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) len = hw_desc->dcmd & PXA_DCMD_LENGTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) end = start + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) * 'passed' will be latched once we found the descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) * which lies inside the boundaries of the curr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) * pointer. All descriptors that occur in the list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) * _after_ we found that partially handled descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) * are still to be processed and are hence added to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) * residual bytes counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) if (passed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) residue += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) } else if (curr >= start && curr <= end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) residue += end - curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) passed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) if (!passed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) residue = sw_desc->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) spin_unlock_irqrestore(&chan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) dev_dbg(&chan->vc.chan.dev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) "%s(): txd %p[%x] sw_desc=%p: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) __func__, vd, cookie, sw_desc, residue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) return residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) static enum dma_status pxad_tx_status(struct dma_chan *dchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) dma_cookie_t cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) struct dma_tx_state *txstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) struct pxad_chan *chan = to_pxad_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) enum dma_status ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) if (cookie == chan->bus_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) return DMA_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) ret = dma_cookie_status(dchan, cookie, txstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) if (likely(txstate && (ret != DMA_ERROR)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) dma_set_residue(txstate, pxad_residue(chan, cookie));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) static void pxad_synchronize(struct dma_chan *dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) struct pxad_chan *chan = to_pxad_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) wait_event(chan->wq_state, !is_chan_running(chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) vchan_synchronize(&chan->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) static void pxad_free_channels(struct dma_device *dmadev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) struct pxad_chan *c, *cn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) list_for_each_entry_safe(c, cn, &dmadev->channels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) vc.chan.device_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) list_del(&c->vc.chan.device_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) tasklet_kill(&c->vc.task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) static int pxad_remove(struct platform_device *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) struct pxad_device *pdev = platform_get_drvdata(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) pxad_cleanup_debugfs(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) pxad_free_channels(&pdev->slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) static int pxad_init_phys(struct platform_device *op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) struct pxad_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) unsigned int nb_phy_chans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) int irq0, irq, nr_irq = 0, i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) struct pxad_phy *phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) irq0 = platform_get_irq(op, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) if (irq0 < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) return irq0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) pdev->phys = devm_kcalloc(&op->dev, nb_phy_chans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) sizeof(pdev->phys[0]), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) if (!pdev->phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) for (i = 0; i < nb_phy_chans; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) if (platform_get_irq(op, i) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) nr_irq++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) for (i = 0; i < nb_phy_chans; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) phy = &pdev->phys[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) phy->base = pdev->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) phy->idx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) irq = platform_get_irq(op, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) if ((nr_irq > 1) && (irq > 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) ret = devm_request_irq(&op->dev, irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) pxad_chan_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) IRQF_SHARED, "pxa-dma", phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) if ((nr_irq == 1) && (i == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) ret = devm_request_irq(&op->dev, irq0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) pxad_int_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) IRQF_SHARED, "pxa-dma", pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) dev_err(pdev->slave.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) "%s(): can't request irq %d:%d\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) irq, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) static const struct of_device_id pxad_dt_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) { .compatible = "marvell,pdma-1.0", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) MODULE_DEVICE_TABLE(of, pxad_dt_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) static struct dma_chan *pxad_dma_xlate(struct of_phandle_args *dma_spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) struct of_dma *ofdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) struct pxad_device *d = ofdma->of_dma_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) chan = dma_get_any_slave_channel(&d->slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) if (!chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) to_pxad_chan(chan)->drcmr = dma_spec->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) to_pxad_chan(chan)->prio = dma_spec->args[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) return chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) static int pxad_init_dmadev(struct platform_device *op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) struct pxad_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) unsigned int nr_phy_chans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) unsigned int nr_requestors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) struct pxad_chan *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) pdev->nr_chans = nr_phy_chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) pdev->nr_requestors = nr_requestors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) INIT_LIST_HEAD(&pdev->slave.channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) pdev->slave.device_alloc_chan_resources = pxad_alloc_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) pdev->slave.device_free_chan_resources = pxad_free_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) pdev->slave.device_tx_status = pxad_tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) pdev->slave.device_issue_pending = pxad_issue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) pdev->slave.device_config = pxad_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) pdev->slave.device_synchronize = pxad_synchronize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) pdev->slave.device_terminate_all = pxad_terminate_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) if (op->dev.coherent_dma_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) dma_set_mask(&op->dev, op->dev.coherent_dma_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) dma_set_mask(&op->dev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) ret = pxad_init_phys(op, pdev, nr_phy_chans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) for (i = 0; i < nr_phy_chans; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) c = devm_kzalloc(&op->dev, sizeof(*c), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) if (!c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) c->drcmr = U32_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) c->prio = PXAD_PRIO_LOWEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) c->vc.desc_free = pxad_free_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) vchan_init(&c->vc, &pdev->slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) init_waitqueue_head(&c->wq_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) return dmaenginem_async_device_register(&pdev->slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) static int pxad_probe(struct platform_device *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) struct pxad_device *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) const struct of_device_id *of_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) const struct dma_slave_map *slave_map = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) struct resource *iores;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) int ret, dma_channels = 0, nb_requestors = 0, slave_map_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) const enum dma_slave_buswidth widths =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) DMA_SLAVE_BUSWIDTH_4_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) if (!pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) spin_lock_init(&pdev->phy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) iores = platform_get_resource(op, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) pdev->base = devm_ioremap_resource(&op->dev, iores);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) if (IS_ERR(pdev->base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) return PTR_ERR(pdev->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) of_id = of_match_device(pxad_dt_ids, &op->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) if (of_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) of_property_read_u32(op->dev.of_node, "#dma-channels",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) &dma_channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) ret = of_property_read_u32(op->dev.of_node, "#dma-requests",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) &nb_requestors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) dev_warn(pdev->slave.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) "#dma-requests set to default 32 as missing in OF: %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) nb_requestors = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) } else if (pdata && pdata->dma_channels) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) dma_channels = pdata->dma_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) nb_requestors = pdata->nb_requestors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) slave_map = pdata->slave_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) slave_map_cnt = pdata->slave_map_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) dma_channels = 32; /* default 32 channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) dma_cap_set(DMA_SLAVE, pdev->slave.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) dma_cap_set(DMA_MEMCPY, pdev->slave.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) dma_cap_set(DMA_CYCLIC, pdev->slave.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) dma_cap_set(DMA_PRIVATE, pdev->slave.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) pdev->slave.device_prep_dma_memcpy = pxad_prep_memcpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) pdev->slave.device_prep_slave_sg = pxad_prep_slave_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) pdev->slave.device_prep_dma_cyclic = pxad_prep_dma_cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) pdev->slave.filter.map = slave_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) pdev->slave.filter.mapcnt = slave_map_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) pdev->slave.filter.fn = pxad_filter_fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) pdev->slave.copy_align = PDMA_ALIGNMENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) pdev->slave.src_addr_widths = widths;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) pdev->slave.dst_addr_widths = widths;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) pdev->slave.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) pdev->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) pdev->slave.descriptor_reuse = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) pdev->slave.dev = &op->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) ret = pxad_init_dmadev(op, pdev, dma_channels, nb_requestors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) dev_err(pdev->slave.dev, "unable to register\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) if (op->dev.of_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) /* Device-tree DMA controller registration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) ret = of_dma_controller_register(op->dev.of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) pxad_dma_xlate, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) dev_err(pdev->slave.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) "of_dma_controller_register failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) platform_set_drvdata(op, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) pxad_init_debugfs(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) dev_info(pdev->slave.dev, "initialized %d channels on %d requestors\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) dma_channels, nb_requestors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) static const struct platform_device_id pxad_id_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) { "pxa-dma", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) static struct platform_driver pxad_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) .name = "pxa-dma",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) .of_match_table = pxad_dt_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) .id_table = pxad_id_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) .probe = pxad_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) .remove = pxad_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) static bool pxad_filter_fn(struct dma_chan *chan, void *param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) struct pxad_chan *c = to_pxad_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) struct pxad_param *p = param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) if (chan->device->dev->driver != &pxad_driver.driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) c->drcmr = p->drcmr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) c->prio = p->prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) module_platform_driver(pxad_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) MODULE_DESCRIPTION("Marvell PXA Peripheral DMA Driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) MODULE_LICENSE("GPL v2");