^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) // (C) 2017-2018 Synopsys, Inc. (www.synopsys.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Synopsys DesignWare AXI DMA Controller driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Author: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/dmapool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/property.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "dw-axi-dmac.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "../dmaengine.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "../virt-dma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * The set of bus widths supported by the DMA controller. DW AXI DMAC supports
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * master data bus width up to 512 bits (for both AXI master interfaces), but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * it depends on IP block configurarion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define AXI_DMA_BUSWIDTHS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) (DMA_SLAVE_BUSWIDTH_1_BYTE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) DMA_SLAVE_BUSWIDTH_2_BYTES | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) DMA_SLAVE_BUSWIDTH_4_BYTES | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) DMA_SLAVE_BUSWIDTH_8_BYTES | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) DMA_SLAVE_BUSWIDTH_16_BYTES | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) DMA_SLAVE_BUSWIDTH_32_BYTES | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) DMA_SLAVE_BUSWIDTH_64_BYTES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) axi_dma_iowrite32(struct axi_dma_chip *chip, u32 reg, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) iowrite32(val, chip->regs + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static inline u32 axi_dma_ioread32(struct axi_dma_chip *chip, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) return ioread32(chip->regs + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) axi_chan_iowrite32(struct axi_dma_chan *chan, u32 reg, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) iowrite32(val, chan->chan_regs + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static inline u32 axi_chan_ioread32(struct axi_dma_chan *chan, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return ioread32(chan->chan_regs + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) axi_chan_iowrite64(struct axi_dma_chan *chan, u32 reg, u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * We split one 64 bit write for two 32 bit write as some HW doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * support 64 bit access.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) iowrite32(lower_32_bits(val), chan->chan_regs + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) iowrite32(upper_32_bits(val), chan->chan_regs + reg + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static inline void axi_dma_disable(struct axi_dma_chip *chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) val = axi_dma_ioread32(chip, DMAC_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) val &= ~DMAC_EN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) axi_dma_iowrite32(chip, DMAC_CFG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static inline void axi_dma_enable(struct axi_dma_chip *chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) val = axi_dma_ioread32(chip, DMAC_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) val |= DMAC_EN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) axi_dma_iowrite32(chip, DMAC_CFG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) static inline void axi_dma_irq_disable(struct axi_dma_chip *chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) val = axi_dma_ioread32(chip, DMAC_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) val &= ~INT_EN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) axi_dma_iowrite32(chip, DMAC_CFG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static inline void axi_dma_irq_enable(struct axi_dma_chip *chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) val = axi_dma_ioread32(chip, DMAC_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) val |= INT_EN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) axi_dma_iowrite32(chip, DMAC_CFG, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) static inline void axi_chan_irq_disable(struct axi_dma_chan *chan, u32 irq_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (likely(irq_mask == DWAXIDMAC_IRQ_ALL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, DWAXIDMAC_IRQ_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) val = axi_chan_ioread32(chan, CH_INTSTATUS_ENA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) val &= ~irq_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static inline void axi_chan_irq_set(struct axi_dma_chan *chan, u32 irq_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, irq_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static inline void axi_chan_irq_sig_set(struct axi_dma_chan *chan, u32 irq_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) axi_chan_iowrite32(chan, CH_INTSIGNAL_ENA, irq_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static inline void axi_chan_irq_clear(struct axi_dma_chan *chan, u32 irq_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) axi_chan_iowrite32(chan, CH_INTCLEAR, irq_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static inline u32 axi_chan_irq_read(struct axi_dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return axi_chan_ioread32(chan, CH_INTSTATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static inline void axi_chan_disable(struct axi_dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) val |= BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static inline void axi_chan_enable(struct axi_dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static inline bool axi_chan_is_hw_enable(struct axi_dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return !!(val & (BIT(chan->id) << DMAC_CHAN_EN_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static void axi_dma_hw_init(struct axi_dma_chip *chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) for (i = 0; i < chip->dw->hdata->nr_channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) axi_chan_disable(&chip->dw->chan[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) static u32 axi_chan_get_xfer_width(struct axi_dma_chan *chan, dma_addr_t src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) dma_addr_t dst, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) u32 max_width = chan->chip->dw->hdata->m_data_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) return __ffs(src | dst | len | BIT(max_width));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) static inline const char *axi_chan_name(struct axi_dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) return dma_chan_name(&chan->vc.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) static struct axi_dma_desc *axi_desc_get(struct axi_dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) struct dw_axi_dma *dw = chan->chip->dw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct axi_dma_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) dma_addr_t phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) desc = dma_pool_zalloc(dw->desc_pool, GFP_NOWAIT, &phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (unlikely(!desc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) dev_err(chan2dev(chan), "%s: not enough descriptors available\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) axi_chan_name(chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) atomic_inc(&chan->descs_allocated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) INIT_LIST_HEAD(&desc->xfer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) desc->vd.tx.phys = phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) desc->chan = chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) static void axi_desc_put(struct axi_dma_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct axi_dma_chan *chan = desc->chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct dw_axi_dma *dw = chan->chip->dw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct axi_dma_desc *child, *_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) unsigned int descs_put = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) list_for_each_entry_safe(child, _next, &desc->xfer_list, xfer_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) list_del(&child->xfer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) dma_pool_free(dw->desc_pool, child, child->vd.tx.phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) descs_put++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) dma_pool_free(dw->desc_pool, desc, desc->vd.tx.phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) descs_put++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) atomic_sub(descs_put, &chan->descs_allocated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) dev_vdbg(chan2dev(chan), "%s: %d descs put, %d still allocated\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) axi_chan_name(chan), descs_put,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) atomic_read(&chan->descs_allocated));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) static void vchan_desc_put(struct virt_dma_desc *vdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) axi_desc_put(vd_to_axi_desc(vdesc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) static enum dma_status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) struct dma_tx_state *txstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) enum dma_status ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) ret = dma_cookie_status(dchan, cookie, txstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (chan->is_paused && ret == DMA_IN_PROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) ret = DMA_PAUSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static void write_desc_llp(struct axi_dma_desc *desc, dma_addr_t adr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) desc->lli.llp = cpu_to_le64(adr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) static void write_chan_llp(struct axi_dma_chan *chan, dma_addr_t adr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) axi_chan_iowrite64(chan, CH_LLP, adr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) /* Called in chan locked context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct axi_dma_desc *first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) u32 priority = chan->chip->dw->hdata->priority[chan->id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) u32 reg, irq_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) u8 lms = 0; /* Select AXI0 master for LLI fetching */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (unlikely(axi_chan_is_hw_enable(chan))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) dev_err(chan2dev(chan), "%s is non-idle!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) axi_chan_name(chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) axi_dma_enable(chan->chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) reg = (DWAXIDMAC_MBLK_TYPE_LL << CH_CFG_L_DST_MULTBLK_TYPE_POS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) DWAXIDMAC_MBLK_TYPE_LL << CH_CFG_L_SRC_MULTBLK_TYPE_POS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) axi_chan_iowrite32(chan, CH_CFG_L, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) reg = (DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC << CH_CFG_H_TT_FC_POS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) priority << CH_CFG_H_PRIORITY_POS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) DWAXIDMAC_HS_SEL_HW << CH_CFG_H_HS_SEL_DST_POS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) DWAXIDMAC_HS_SEL_HW << CH_CFG_H_HS_SEL_SRC_POS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) axi_chan_iowrite32(chan, CH_CFG_H, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) write_chan_llp(chan, first->vd.tx.phys | lms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) irq_mask = DWAXIDMAC_IRQ_DMA_TRF | DWAXIDMAC_IRQ_ALL_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) axi_chan_irq_sig_set(chan, irq_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /* Generate 'suspend' status but don't generate interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) irq_mask |= DWAXIDMAC_IRQ_SUSPENDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) axi_chan_irq_set(chan, irq_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) axi_chan_enable(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) static void axi_chan_start_first_queued(struct axi_dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct axi_dma_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) struct virt_dma_desc *vd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) vd = vchan_next_desc(&chan->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) if (!vd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) desc = vd_to_axi_desc(vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) dev_vdbg(chan2dev(chan), "%s: started %u\n", axi_chan_name(chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) vd->tx.cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) axi_chan_block_xfer_start(chan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) static void dma_chan_issue_pending(struct dma_chan *dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) spin_lock_irqsave(&chan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (vchan_issue_pending(&chan->vc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) axi_chan_start_first_queued(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) spin_unlock_irqrestore(&chan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) static int dma_chan_alloc_chan_resources(struct dma_chan *dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) /* ASSERT: channel is idle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (axi_chan_is_hw_enable(chan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) dev_err(chan2dev(chan), "%s is non-idle!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) axi_chan_name(chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) dev_vdbg(dchan2dev(dchan), "%s: allocating\n", axi_chan_name(chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) pm_runtime_get(chan->chip->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) static void dma_chan_free_chan_resources(struct dma_chan *dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) /* ASSERT: channel is idle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (axi_chan_is_hw_enable(chan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) dev_err(dchan2dev(dchan), "%s is non-idle!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) axi_chan_name(chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) axi_chan_disable(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) axi_chan_irq_disable(chan, DWAXIDMAC_IRQ_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) vchan_free_chan_resources(&chan->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) dev_vdbg(dchan2dev(dchan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) "%s: free resources, descriptor still allocated: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) axi_chan_name(chan), atomic_read(&chan->descs_allocated));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) pm_runtime_put(chan->chip->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * If DW_axi_dmac sees CHx_CTL.ShadowReg_Or_LLI_Last bit of the fetched LLI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * as 1, it understands that the current block is the final block in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * transfer and completes the DMA transfer operation at the end of current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * block transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) static void set_desc_last(struct axi_dma_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) val = le32_to_cpu(desc->lli.ctl_hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) val |= CH_CTL_H_LLI_LAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) desc->lli.ctl_hi = cpu_to_le32(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) static void write_desc_sar(struct axi_dma_desc *desc, dma_addr_t adr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) desc->lli.sar = cpu_to_le64(adr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) static void write_desc_dar(struct axi_dma_desc *desc, dma_addr_t adr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) desc->lli.dar = cpu_to_le64(adr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) static void set_desc_src_master(struct axi_dma_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) /* Select AXI0 for source master */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) val = le32_to_cpu(desc->lli.ctl_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) val &= ~CH_CTL_L_SRC_MAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) desc->lli.ctl_lo = cpu_to_le32(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) static void set_desc_dest_master(struct axi_dma_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) /* Select AXI1 for source master if available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) val = le32_to_cpu(desc->lli.ctl_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (desc->chan->chip->dw->hdata->nr_masters > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) val |= CH_CTL_L_DST_MAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) val &= ~CH_CTL_L_DST_MAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) desc->lli.ctl_lo = cpu_to_le32(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) dma_addr_t src_adr, size_t len, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) struct axi_dma_desc *first = NULL, *desc = NULL, *prev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) size_t block_ts, max_block_ts, xfer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) u32 xfer_width, reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) u8 lms = 0; /* Select AXI0 master for LLI fetching */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) dev_dbg(chan2dev(chan), "%s: memcpy: src: %pad dst: %pad length: %zd flags: %#lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) axi_chan_name(chan), &src_adr, &dst_adr, len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) max_block_ts = chan->chip->dw->hdata->block_size[chan->id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) while (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) xfer_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * Take care for the alignment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * Actually source and destination widths can be different, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * make them same to be simpler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) xfer_width = axi_chan_get_xfer_width(chan, src_adr, dst_adr, xfer_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * block_ts indicates the total number of data of width
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * to be transferred in a DMA block transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * BLOCK_TS register should be set to block_ts - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) block_ts = xfer_len >> xfer_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (block_ts > max_block_ts) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) block_ts = max_block_ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) xfer_len = max_block_ts << xfer_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) desc = axi_desc_get(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (unlikely(!desc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) goto err_desc_get;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) write_desc_sar(desc, src_adr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) write_desc_dar(desc, dst_adr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) desc->lli.block_ts_lo = cpu_to_le32(block_ts - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) reg = CH_CTL_H_LLI_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (chan->chip->dw->hdata->restrict_axi_burst_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) u32 burst_len = chan->chip->dw->hdata->axi_rw_burst_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) reg |= (CH_CTL_H_ARLEN_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) burst_len << CH_CTL_H_ARLEN_POS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) CH_CTL_H_AWLEN_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) burst_len << CH_CTL_H_AWLEN_POS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) desc->lli.ctl_hi = cpu_to_le32(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) reg = (DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) xfer_width << CH_CTL_L_DST_WIDTH_POS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) xfer_width << CH_CTL_L_SRC_WIDTH_POS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) desc->lli.ctl_lo = cpu_to_le32(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) set_desc_src_master(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) set_desc_dest_master(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) /* Manage transfer list (xfer_list) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (!first) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) first = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) list_add_tail(&desc->xfer_list, &first->xfer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) write_desc_llp(prev, desc->vd.tx.phys | lms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) prev = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) /* update the length and addresses for the next loop cycle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) len -= xfer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) dst_adr += xfer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) src_adr += xfer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) /* Total len of src/dest sg == 0, so no descriptor were allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) if (unlikely(!first))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) /* Set end-of-link to the last link descriptor of list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) set_desc_last(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) return vchan_tx_prep(&chan->vc, &first->vd, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) err_desc_get:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) axi_desc_put(first);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) static void axi_chan_dump_lli(struct axi_dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) struct axi_dma_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) dev_err(dchan2dev(&chan->vc.chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) "SAR: 0x%llx DAR: 0x%llx LLP: 0x%llx BTS 0x%x CTL: 0x%x:%08x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) le64_to_cpu(desc->lli.sar),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) le64_to_cpu(desc->lli.dar),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) le64_to_cpu(desc->lli.llp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) le32_to_cpu(desc->lli.block_ts_lo),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) le32_to_cpu(desc->lli.ctl_hi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) le32_to_cpu(desc->lli.ctl_lo));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) static void axi_chan_list_dump_lli(struct axi_dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) struct axi_dma_desc *desc_head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) struct axi_dma_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) axi_chan_dump_lli(chan, desc_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) list_for_each_entry(desc, &desc_head->xfer_list, xfer_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) axi_chan_dump_lli(chan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) struct virt_dma_desc *vd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) spin_lock_irqsave(&chan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) axi_chan_disable(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) /* The bad descriptor currently is in the head of vc list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) vd = vchan_next_desc(&chan->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) /* Remove the completed descriptor from issued list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) list_del(&vd->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) /* WARN about bad descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) dev_err(chan2dev(chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) "Bad descriptor submitted for %s, cookie: %d, irq: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) axi_chan_name(chan), vd->tx.cookie, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) axi_chan_list_dump_lli(chan, vd_to_axi_desc(vd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) vchan_cookie_complete(vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) /* Try to restart the controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) axi_chan_start_first_queued(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) spin_unlock_irqrestore(&chan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) struct virt_dma_desc *vd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) spin_lock_irqsave(&chan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) if (unlikely(axi_chan_is_hw_enable(chan))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) dev_err(chan2dev(chan), "BUG: %s caught DWAXIDMAC_IRQ_DMA_TRF, but channel not idle!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) axi_chan_name(chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) axi_chan_disable(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) /* The completed descriptor currently is in the head of vc list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) vd = vchan_next_desc(&chan->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) /* Remove the completed descriptor from issued list before completing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) list_del(&vd->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) vchan_cookie_complete(vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) /* Submit queued descriptors after processing the completed ones */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) axi_chan_start_first_queued(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) spin_unlock_irqrestore(&chan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) static irqreturn_t dw_axi_dma_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) struct axi_dma_chip *chip = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) struct dw_axi_dma *dw = chip->dw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) struct axi_dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) u32 status, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) /* Disable DMAC inerrupts. We'll enable them after processing chanels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) axi_dma_irq_disable(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) /* Poll, clear and process every chanel interrupt status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) for (i = 0; i < dw->hdata->nr_channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) chan = &dw->chan[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) status = axi_chan_irq_read(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) axi_chan_irq_clear(chan, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) dev_vdbg(chip->dev, "%s %u IRQ status: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) axi_chan_name(chan), i, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) if (status & DWAXIDMAC_IRQ_ALL_ERR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) axi_chan_handle_err(chan, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) else if (status & DWAXIDMAC_IRQ_DMA_TRF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) axi_chan_block_xfer_complete(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) /* Re-enable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) axi_dma_irq_enable(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) static int dma_chan_terminate_all(struct dma_chan *dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) LIST_HEAD(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) spin_lock_irqsave(&chan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) axi_chan_disable(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) vchan_get_all_descriptors(&chan->vc, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) spin_unlock_irqrestore(&chan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) vchan_dma_desc_free_list(&chan->vc, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) dev_vdbg(dchan2dev(dchan), "terminated: %s\n", axi_chan_name(chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) static int dma_chan_pause(struct dma_chan *dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) unsigned int timeout = 20; /* timeout iterations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) spin_lock_irqsave(&chan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) val |= BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (axi_chan_irq_read(chan) & DWAXIDMAC_IRQ_SUSPENDED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) udelay(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) } while (--timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) axi_chan_irq_clear(chan, DWAXIDMAC_IRQ_SUSPENDED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) chan->is_paused = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) spin_unlock_irqrestore(&chan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) return timeout ? 0 : -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) /* Called in chan locked context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) static inline void axi_chan_resume(struct axi_dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) val |= (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) chan->is_paused = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) static int dma_chan_resume(struct dma_chan *dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) spin_lock_irqsave(&chan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) if (chan->is_paused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) axi_chan_resume(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) spin_unlock_irqrestore(&chan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) static int axi_dma_suspend(struct axi_dma_chip *chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) axi_dma_irq_disable(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) axi_dma_disable(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) clk_disable_unprepare(chip->core_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) clk_disable_unprepare(chip->cfgr_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) static int axi_dma_resume(struct axi_dma_chip *chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) ret = clk_prepare_enable(chip->cfgr_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) ret = clk_prepare_enable(chip->core_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) axi_dma_enable(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) axi_dma_irq_enable(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) static int __maybe_unused axi_dma_runtime_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) struct axi_dma_chip *chip = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) return axi_dma_suspend(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) static int __maybe_unused axi_dma_runtime_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) struct axi_dma_chip *chip = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) return axi_dma_resume(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) static int parse_device_properties(struct axi_dma_chip *chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) struct device *dev = chip->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) u32 tmp, carr[DMAC_MAX_CHANNELS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) ret = device_property_read_u32(dev, "dma-channels", &tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) if (tmp == 0 || tmp > DMAC_MAX_CHANNELS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) chip->dw->hdata->nr_channels = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) ret = device_property_read_u32(dev, "snps,dma-masters", &tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if (tmp == 0 || tmp > DMAC_MAX_MASTERS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) chip->dw->hdata->nr_masters = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) ret = device_property_read_u32(dev, "snps,data-width", &tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (tmp > DWAXIDMAC_TRANS_WIDTH_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) chip->dw->hdata->m_data_width = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) ret = device_property_read_u32_array(dev, "snps,block-size", carr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) chip->dw->hdata->nr_channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if (carr[tmp] == 0 || carr[tmp] > DMAC_MAX_BLK_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) chip->dw->hdata->block_size[tmp] = carr[tmp];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) ret = device_property_read_u32_array(dev, "snps,priority", carr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) chip->dw->hdata->nr_channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) /* Priority value must be programmed within [0:nr_channels-1] range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) if (carr[tmp] >= chip->dw->hdata->nr_channels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) chip->dw->hdata->priority[tmp] = carr[tmp];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) /* axi-max-burst-len is optional property */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) ret = device_property_read_u32(dev, "snps,axi-max-burst-len", &tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) if (tmp > DWAXIDMAC_ARWLEN_MAX + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) if (tmp < DWAXIDMAC_ARWLEN_MIN + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) chip->dw->hdata->restrict_axi_burst_len = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) chip->dw->hdata->axi_rw_burst_len = tmp - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) static int dw_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) struct axi_dma_chip *chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) struct resource *mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) struct dw_axi_dma *dw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) struct dw_axi_dma_hcfg *hdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) if (!chip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) dw = devm_kzalloc(&pdev->dev, sizeof(*dw), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (!dw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) hdata = devm_kzalloc(&pdev->dev, sizeof(*hdata), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) if (!hdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) chip->dw = dw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) chip->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) chip->dw->hdata = hdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) chip->irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) if (chip->irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) return chip->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) chip->regs = devm_ioremap_resource(chip->dev, mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) if (IS_ERR(chip->regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) return PTR_ERR(chip->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) chip->core_clk = devm_clk_get(chip->dev, "core-clk");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) if (IS_ERR(chip->core_clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) return PTR_ERR(chip->core_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) chip->cfgr_clk = devm_clk_get(chip->dev, "cfgr-clk");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) if (IS_ERR(chip->cfgr_clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) return PTR_ERR(chip->cfgr_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) ret = parse_device_properties(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) dw->chan = devm_kcalloc(chip->dev, hdata->nr_channels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) sizeof(*dw->chan), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) if (!dw->chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) ret = devm_request_irq(chip->dev, chip->irq, dw_axi_dma_interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) IRQF_SHARED, KBUILD_MODNAME, chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) /* Lli address must be aligned to a 64-byte boundary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) dw->desc_pool = dmam_pool_create(KBUILD_MODNAME, chip->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) sizeof(struct axi_dma_desc), 64, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) if (!dw->desc_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) dev_err(chip->dev, "No memory for descriptors dma pool\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) INIT_LIST_HEAD(&dw->dma.channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) for (i = 0; i < hdata->nr_channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) struct axi_dma_chan *chan = &dw->chan[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) chan->chip = chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) chan->id = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) chan->chan_regs = chip->regs + COMMON_REG_LEN + i * CHAN_REG_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) atomic_set(&chan->descs_allocated, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) chan->vc.desc_free = vchan_desc_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) vchan_init(&chan->vc, &dw->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) /* Set capabilities */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) /* DMA capabilities */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) dw->dma.chancnt = hdata->nr_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) dw->dma.src_addr_widths = AXI_DMA_BUSWIDTHS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) dw->dma.dst_addr_widths = AXI_DMA_BUSWIDTHS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) dw->dma.directions = BIT(DMA_MEM_TO_MEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) dw->dma.dev = chip->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) dw->dma.device_tx_status = dma_chan_tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) dw->dma.device_issue_pending = dma_chan_issue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) dw->dma.device_terminate_all = dma_chan_terminate_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) dw->dma.device_pause = dma_chan_pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) dw->dma.device_resume = dma_chan_resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) dw->dma.device_alloc_chan_resources = dma_chan_alloc_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) dw->dma.device_free_chan_resources = dma_chan_free_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) dw->dma.device_prep_dma_memcpy = dma_chan_prep_dma_memcpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) platform_set_drvdata(pdev, chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) pm_runtime_enable(chip->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * We can't just call pm_runtime_get here instead of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) * pm_runtime_get_noresume + axi_dma_resume because we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) * driver to work also without Runtime PM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) pm_runtime_get_noresume(chip->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) ret = axi_dma_resume(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) goto err_pm_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) axi_dma_hw_init(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) pm_runtime_put(chip->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) ret = dmaenginem_async_device_register(&dw->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) goto err_pm_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) dev_info(chip->dev, "DesignWare AXI DMA Controller, %d channels\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) dw->hdata->nr_channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) err_pm_disable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) pm_runtime_disable(chip->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) static int dw_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) struct axi_dma_chip *chip = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) struct dw_axi_dma *dw = chip->dw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) struct axi_dma_chan *chan, *_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) /* Enable clk before accessing to registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) clk_prepare_enable(chip->cfgr_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) clk_prepare_enable(chip->core_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) axi_dma_irq_disable(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) for (i = 0; i < dw->hdata->nr_channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) axi_chan_disable(&chip->dw->chan[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) axi_dma_disable(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) pm_runtime_disable(chip->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) axi_dma_suspend(chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) devm_free_irq(chip->dev, chip->irq, chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) list_for_each_entry_safe(chan, _chan, &dw->dma.channels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) vc.chan.device_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) list_del(&chan->vc.chan.device_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) tasklet_kill(&chan->vc.task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) static const struct dev_pm_ops dw_axi_dma_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) SET_RUNTIME_PM_OPS(axi_dma_runtime_suspend, axi_dma_runtime_resume, NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) static const struct of_device_id dw_dma_of_id_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) { .compatible = "snps,axi-dma-1.01a" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) static struct platform_driver dw_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) .probe = dw_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) .remove = dw_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) .name = KBUILD_MODNAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) .of_match_table = of_match_ptr(dw_dma_of_id_table),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) .pm = &dw_axi_dma_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) module_platform_driver(dw_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) MODULE_DESCRIPTION("Synopsys DesignWare AXI DMA Controller platform driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) MODULE_AUTHOR("Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>");