^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) // Copyright (c) 2013-2014 Freescale Semiconductor, Inc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) // Copyright (c) 2017 Sysam, Angelo Dureghello <angelo@sysam.it>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/dmapool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "fsl-edma-common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define EDMA_CR 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #define EDMA_ES 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #define EDMA_ERQ 0x0C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define EDMA_EEI 0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define EDMA_SERQ 0x1B
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define EDMA_CERQ 0x1A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define EDMA_SEEI 0x19
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define EDMA_CEEI 0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define EDMA_CINT 0x1F
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define EDMA_CERR 0x1E
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define EDMA_SSRT 0x1D
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define EDMA_CDNE 0x1C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define EDMA_INTR 0x24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define EDMA_ERR 0x2C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define EDMA64_ERQH 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define EDMA64_EEIH 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define EDMA64_SERQ 0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define EDMA64_CERQ 0x19
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define EDMA64_SEEI 0x1a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define EDMA64_CEEI 0x1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define EDMA64_CINT 0x1c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define EDMA64_CERR 0x1d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define EDMA64_SSRT 0x1e
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define EDMA64_CDNE 0x1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define EDMA64_INTH 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define EDMA64_INTL 0x24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define EDMA64_ERRH 0x28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define EDMA64_ERRL 0x2c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define EDMA_TCD 0x1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct edma_regs *regs = &fsl_chan->edma->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) u32 ch = fsl_chan->vchan.chan.chan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) if (fsl_chan->edma->drvdata->version == v1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), regs->seei);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) edma_writeb(fsl_chan->edma, ch, regs->serq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /* ColdFire is big endian, and accesses natively
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * big endian I/O peripherals
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) iowrite8(EDMA_SEEI_SEEI(ch), regs->seei);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) iowrite8(ch, regs->serq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct edma_regs *regs = &fsl_chan->edma->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) u32 ch = fsl_chan->vchan.chan.chan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) if (fsl_chan->edma->drvdata->version == v1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) edma_writeb(fsl_chan->edma, ch, regs->cerq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), regs->ceei);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /* ColdFire is big endian, and accesses natively
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * big endian I/O peripherals
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) iowrite8(ch, regs->cerq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) iowrite8(EDMA_CEEI_CEEI(ch), regs->ceei);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) EXPORT_SYMBOL_GPL(fsl_edma_disable_request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static void mux_configure8(struct fsl_edma_chan *fsl_chan, void __iomem *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) u32 off, u32 slot, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) u8 val8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) val8 = EDMAMUX_CHCFG_ENBL | slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) val8 = EDMAMUX_CHCFG_DIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) iowrite8(val8, addr + off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static void mux_configure32(struct fsl_edma_chan *fsl_chan, void __iomem *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) u32 off, u32 slot, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) val = EDMAMUX_CHCFG_ENBL << 24 | slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) val = EDMAMUX_CHCFG_DIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) iowrite32(val, addr + off * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) unsigned int slot, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) u32 ch = fsl_chan->vchan.chan.chan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) void __iomem *muxaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) unsigned int chans_per_mux, ch_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) int endian_diff[4] = {3, 1, -1, -3};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) u32 dmamux_nr = fsl_chan->edma->drvdata->dmamuxs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) chans_per_mux = fsl_chan->edma->n_chans / dmamux_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (fsl_chan->edma->drvdata->mux_swap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) ch_off += endian_diff[ch_off % 4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) slot = EDMAMUX_CHCFG_SOURCE(slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if (fsl_chan->edma->drvdata->version == v3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) mux_configure32(fsl_chan, muxaddr, ch_off, slot, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) mux_configure8(fsl_chan, muxaddr, ch_off, slot, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) EXPORT_SYMBOL_GPL(fsl_edma_chan_mux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) switch (addr_width) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return EDMA_TCD_ATTR_SSIZE_8BIT | EDMA_TCD_ATTR_DSIZE_8BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) return EDMA_TCD_ATTR_SSIZE_16BIT | EDMA_TCD_ATTR_DSIZE_16BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return EDMA_TCD_ATTR_SSIZE_64BIT | EDMA_TCD_ATTR_DSIZE_64BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) struct fsl_edma_desc *fsl_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) fsl_desc = to_fsl_edma_desc(vdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) for (i = 0; i < fsl_desc->n_tcds; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) dma_pool_free(fsl_desc->echan->tcd_pool, fsl_desc->tcd[i].vtcd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) fsl_desc->tcd[i].ptcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) kfree(fsl_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) EXPORT_SYMBOL_GPL(fsl_edma_free_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) int fsl_edma_terminate_all(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) LIST_HEAD(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) fsl_edma_disable_request(fsl_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) fsl_chan->edesc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) fsl_chan->idle = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) vchan_get_all_descriptors(&fsl_chan->vchan, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) EXPORT_SYMBOL_GPL(fsl_edma_terminate_all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) int fsl_edma_pause(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (fsl_chan->edesc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) fsl_edma_disable_request(fsl_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) fsl_chan->status = DMA_PAUSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) fsl_chan->idle = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) EXPORT_SYMBOL_GPL(fsl_edma_pause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) int fsl_edma_resume(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (fsl_chan->edesc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) fsl_edma_enable_request(fsl_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) fsl_chan->status = DMA_IN_PROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) fsl_chan->idle = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) EXPORT_SYMBOL_GPL(fsl_edma_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) static void fsl_edma_unprep_slave_dma(struct fsl_edma_chan *fsl_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (fsl_chan->dma_dir != DMA_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) dma_unmap_resource(fsl_chan->vchan.chan.device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) fsl_chan->dma_dev_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) fsl_chan->dma_dev_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) fsl_chan->dma_dir, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) fsl_chan->dma_dir = DMA_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) static bool fsl_edma_prep_slave_dma(struct fsl_edma_chan *fsl_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) enum dma_transfer_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct device *dev = fsl_chan->vchan.chan.device->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) enum dma_data_direction dma_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) phys_addr_t addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) u32 size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) switch (dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) case DMA_MEM_TO_DEV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) dma_dir = DMA_FROM_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) addr = fsl_chan->cfg.dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) size = fsl_chan->cfg.dst_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) case DMA_DEV_TO_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) dma_dir = DMA_TO_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) addr = fsl_chan->cfg.src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) size = fsl_chan->cfg.src_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) dma_dir = DMA_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) /* Already mapped for this config? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (fsl_chan->dma_dir == dma_dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) fsl_edma_unprep_slave_dma(fsl_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) fsl_chan->dma_dev_addr = dma_map_resource(dev, addr, size, dma_dir, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (dma_mapping_error(dev, fsl_chan->dma_dev_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) fsl_chan->dma_dev_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) fsl_chan->dma_dir = dma_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) int fsl_edma_slave_config(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) struct dma_slave_config *cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) memcpy(&fsl_chan->cfg, cfg, sizeof(*cfg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) fsl_edma_unprep_slave_dma(fsl_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) EXPORT_SYMBOL_GPL(fsl_edma_slave_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) struct virt_dma_desc *vdesc, bool in_progress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct fsl_edma_desc *edesc = fsl_chan->edesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) struct edma_regs *regs = &fsl_chan->edma->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) u32 ch = fsl_chan->vchan.chan.chan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) enum dma_transfer_direction dir = edesc->dirn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) dma_addr_t cur_addr, dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) size_t len, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /* calculate the total size in this desc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) len += le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * le16_to_cpu(edesc->tcd[i].vtcd->biter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (!in_progress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) if (dir == DMA_MEM_TO_DEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) cur_addr = edma_readl(fsl_chan->edma, ®s->tcd[ch].saddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) cur_addr = edma_readl(fsl_chan->edma, ®s->tcd[ch].daddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) /* figure out the finished and calculate the residue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) size = le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * le16_to_cpu(edesc->tcd[i].vtcd->biter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (dir == DMA_MEM_TO_DEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->daddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) len -= size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if (cur_addr >= dma_addr && cur_addr < dma_addr + size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) len += dma_addr + size - cur_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) dma_cookie_t cookie, struct dma_tx_state *txstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) struct virt_dma_desc *vdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) enum dma_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) status = dma_cookie_status(chan, cookie, txstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) if (status == DMA_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (!txstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) return fsl_chan->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) vdesc = vchan_find_desc(&fsl_chan->vchan, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) txstate->residue =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) fsl_edma_desc_residue(fsl_chan, vdesc, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) else if (vdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) txstate->residue =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) fsl_edma_desc_residue(fsl_chan, vdesc, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) txstate->residue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) return fsl_chan->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) EXPORT_SYMBOL_GPL(fsl_edma_tx_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct fsl_edma_hw_tcd *tcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) struct fsl_edma_engine *edma = fsl_chan->edma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) struct edma_regs *regs = &fsl_chan->edma->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) u32 ch = fsl_chan->vchan.chan.chan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * TCD parameters are stored in struct fsl_edma_hw_tcd in little
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * endian format. However, we need to load the TCD registers in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * big- or little-endian obeying the eDMA engine model endian,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * and this is performed from specific edma_write functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) edma_writew(edma, 0, ®s->tcd[ch].csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) edma_writel(edma, (s32)tcd->saddr, ®s->tcd[ch].saddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) edma_writel(edma, (s32)tcd->daddr, ®s->tcd[ch].daddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) edma_writew(edma, (s16)tcd->attr, ®s->tcd[ch].attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) edma_writew(edma, tcd->soff, ®s->tcd[ch].soff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) edma_writel(edma, (s32)tcd->nbytes, ®s->tcd[ch].nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) edma_writel(edma, (s32)tcd->slast, ®s->tcd[ch].slast);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) edma_writew(edma, (s16)tcd->citer, ®s->tcd[ch].citer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) edma_writew(edma, (s16)tcd->biter, ®s->tcd[ch].biter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) edma_writew(edma, (s16)tcd->doff, ®s->tcd[ch].doff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) edma_writel(edma, (s32)tcd->dlast_sga,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) ®s->tcd[ch].dlast_sga);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) edma_writew(edma, (s16)tcd->csr, ®s->tcd[ch].csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) u16 biter, u16 doff, u32 dlast_sga, bool major_int,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) bool disable_req, bool enable_sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) u16 csr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * eDMA hardware SGs require the TCDs to be stored in little
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * endian format irrespective of the register endian model.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * So we put the value in little endian in memory, waiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * for fsl_edma_set_tcd_regs doing the swap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) tcd->saddr = cpu_to_le32(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) tcd->daddr = cpu_to_le32(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) tcd->attr = cpu_to_le16(attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) tcd->soff = cpu_to_le16(soff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) tcd->nbytes = cpu_to_le32(nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) tcd->slast = cpu_to_le32(slast);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) tcd->doff = cpu_to_le16(doff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) tcd->dlast_sga = cpu_to_le32(dlast_sga);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (major_int)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) csr |= EDMA_TCD_CSR_INT_MAJOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (disable_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) csr |= EDMA_TCD_CSR_D_REQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (enable_sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) csr |= EDMA_TCD_CSR_E_SG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) tcd->csr = cpu_to_le16(csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) int sg_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) struct fsl_edma_desc *fsl_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) fsl_desc = kzalloc(struct_size(fsl_desc, tcd, sg_len), GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (!fsl_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) fsl_desc->echan = fsl_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) fsl_desc->n_tcds = sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) for (i = 0; i < sg_len; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) GFP_NOWAIT, &fsl_desc->tcd[i].ptcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (!fsl_desc->tcd[i].vtcd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) return fsl_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) while (--i >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) fsl_desc->tcd[i].ptcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) kfree(fsl_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) size_t period_len, enum dma_transfer_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) struct fsl_edma_desc *fsl_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) dma_addr_t dma_buf_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) int sg_len, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) u32 src_addr, dst_addr, last_sg, nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) u16 soff, doff, iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (!is_slave_direction(direction))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (!fsl_edma_prep_slave_dma(fsl_chan, direction))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) sg_len = buf_len / period_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (!fsl_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) fsl_desc->iscyclic = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) fsl_desc->dirn = direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) dma_buf_next = dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (direction == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) fsl_chan->attr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) nbytes = fsl_chan->cfg.dst_addr_width *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) fsl_chan->cfg.dst_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) fsl_chan->attr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) nbytes = fsl_chan->cfg.src_addr_width *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) fsl_chan->cfg.src_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) iter = period_len / nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) for (i = 0; i < sg_len; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (dma_buf_next >= dma_addr + buf_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) dma_buf_next = dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) /* get next sg's physical address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) if (direction == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) src_addr = dma_buf_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) dst_addr = fsl_chan->dma_dev_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) soff = fsl_chan->cfg.dst_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) doff = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) src_addr = fsl_chan->dma_dev_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) dst_addr = dma_buf_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) soff = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) doff = fsl_chan->cfg.src_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, dst_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) fsl_chan->attr, soff, nbytes, 0, iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) iter, doff, last_sg, true, false, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) dma_buf_next += period_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) EXPORT_SYMBOL_GPL(fsl_edma_prep_dma_cyclic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) struct dma_chan *chan, struct scatterlist *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) unsigned int sg_len, enum dma_transfer_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) unsigned long flags, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) struct fsl_edma_desc *fsl_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) u32 src_addr, dst_addr, last_sg, nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) u16 soff, doff, iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (!is_slave_direction(direction))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (!fsl_edma_prep_slave_dma(fsl_chan, direction))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (!fsl_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) fsl_desc->iscyclic = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) fsl_desc->dirn = direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (direction == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) fsl_chan->attr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) nbytes = fsl_chan->cfg.dst_addr_width *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) fsl_chan->cfg.dst_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) fsl_chan->attr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) nbytes = fsl_chan->cfg.src_addr_width *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) fsl_chan->cfg.src_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) for_each_sg(sgl, sg, sg_len, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) /* get next sg's physical address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (direction == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) src_addr = sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) dst_addr = fsl_chan->dma_dev_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) soff = fsl_chan->cfg.dst_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) doff = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) src_addr = fsl_chan->dma_dev_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) dst_addr = sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) soff = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) doff = fsl_chan->cfg.src_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) iter = sg_dma_len(sg) / nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (i < sg_len - 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) last_sg = fsl_desc->tcd[(i + 1)].ptcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) dst_addr, fsl_chan->attr, soff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) nbytes, 0, iter, iter, doff, last_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) false, false, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) last_sg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) dst_addr, fsl_chan->attr, soff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) nbytes, 0, iter, iter, doff, last_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) true, true, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) EXPORT_SYMBOL_GPL(fsl_edma_prep_slave_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) struct virt_dma_desc *vdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) lockdep_assert_held(&fsl_chan->vchan.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) vdesc = vchan_next_desc(&fsl_chan->vchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (!vdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) fsl_chan->edesc = to_fsl_edma_desc(vdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) fsl_edma_enable_request(fsl_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) fsl_chan->status = DMA_IN_PROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) fsl_chan->idle = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) EXPORT_SYMBOL_GPL(fsl_edma_xfer_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) void fsl_edma_issue_pending(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) if (unlikely(fsl_chan->pm_state != RUNNING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) /* cannot submit due to suspend */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) fsl_edma_xfer_desc(fsl_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) EXPORT_SYMBOL_GPL(fsl_edma_issue_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) sizeof(struct fsl_edma_hw_tcd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 32, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) EXPORT_SYMBOL_GPL(fsl_edma_alloc_chan_resources);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) void fsl_edma_free_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) LIST_HEAD(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) fsl_edma_disable_request(fsl_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) fsl_edma_chan_mux(fsl_chan, 0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) fsl_chan->edesc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) vchan_get_all_descriptors(&fsl_chan->vchan, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) fsl_edma_unprep_slave_dma(fsl_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) dma_pool_destroy(fsl_chan->tcd_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) fsl_chan->tcd_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) EXPORT_SYMBOL_GPL(fsl_edma_free_chan_resources);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) struct fsl_edma_chan *chan, *_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) list_for_each_entry_safe(chan, _chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) &dmadev->channels, vchan.chan.device_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) list_del(&chan->vchan.chan.device_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) tasklet_kill(&chan->vchan.task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) EXPORT_SYMBOL_GPL(fsl_edma_cleanup_vchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) * On the 32 channels Vybrid/mpc577x edma version (here called "v1"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) * register offsets are different compared to ColdFire mcf5441x 64 channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) * edma (here called "v2").
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) * This function sets up register offsets as per proper declared version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * so must be called in xxx_edma_probe() just after setting the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * edma "version" and "membase" appropriately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) void fsl_edma_setup_regs(struct fsl_edma_engine *edma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) edma->regs.cr = edma->membase + EDMA_CR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) edma->regs.es = edma->membase + EDMA_ES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) edma->regs.erql = edma->membase + EDMA_ERQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) edma->regs.eeil = edma->membase + EDMA_EEI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) edma->regs.serq = edma->membase + ((edma->drvdata->version == v2) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) EDMA64_SERQ : EDMA_SERQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) edma->regs.cerq = edma->membase + ((edma->drvdata->version == v2) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) EDMA64_CERQ : EDMA_CERQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) edma->regs.seei = edma->membase + ((edma->drvdata->version == v2) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) EDMA64_SEEI : EDMA_SEEI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) edma->regs.ceei = edma->membase + ((edma->drvdata->version == v2) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) EDMA64_CEEI : EDMA_CEEI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) edma->regs.cint = edma->membase + ((edma->drvdata->version == v2) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) EDMA64_CINT : EDMA_CINT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) edma->regs.cerr = edma->membase + ((edma->drvdata->version == v2) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) EDMA64_CERR : EDMA_CERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) edma->regs.ssrt = edma->membase + ((edma->drvdata->version == v2) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) EDMA64_SSRT : EDMA_SSRT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) edma->regs.cdne = edma->membase + ((edma->drvdata->version == v2) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) EDMA64_CDNE : EDMA_CDNE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) edma->regs.intl = edma->membase + ((edma->drvdata->version == v2) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) EDMA64_INTL : EDMA_INTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) edma->regs.errl = edma->membase + ((edma->drvdata->version == v2) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) EDMA64_ERRL : EDMA_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) if (edma->drvdata->version == v2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) edma->regs.erqh = edma->membase + EDMA64_ERQH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) edma->regs.eeih = edma->membase + EDMA64_EEIH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) edma->regs.errh = edma->membase + EDMA64_ERRH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) edma->regs.inth = edma->membase + EDMA64_INTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) edma->regs.tcd = edma->membase + EDMA_TCD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) EXPORT_SYMBOL_GPL(fsl_edma_setup_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) MODULE_LICENSE("GPL v2");