^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * DMA Engine support for Tsi721 PCIExpress-to-SRIO bridge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2011-2014 Integrated Device Technology, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Alexandre Bounine <alexandre.bounine@idt.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/rio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/rio_drv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/kfifo.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "../../dma/dmaengine.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "tsi721.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #ifdef CONFIG_PCI_MSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static irqreturn_t tsi721_bdma_msix(int irq, void *ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static int tsi721_submit_sg(struct tsi721_tx_desc *desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static unsigned int dma_desc_per_channel = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) module_param(dma_desc_per_channel, uint, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) MODULE_PARM_DESC(dma_desc_per_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) "Number of DMA descriptors per channel (default: 128)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static unsigned int dma_txqueue_sz = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) module_param(dma_txqueue_sz, uint, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) MODULE_PARM_DESC(dma_txqueue_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) "DMA Transactions Queue Size (default: 16)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static u8 dma_sel = 0x7f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) module_param(dma_sel, byte, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) MODULE_PARM_DESC(dma_sel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) "DMA Channel Selection Mask (default: 0x7f = all)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) return container_of(chan, struct tsi721_bdma_chan, dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static inline struct tsi721_device *to_tsi721(struct dma_device *ddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return container_of(ddev, struct rio_mport, dma)->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct tsi721_tx_desc *to_tsi721_desc(struct dma_async_tx_descriptor *txd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) return container_of(txd, struct tsi721_tx_desc, txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct tsi721_dma_desc *bd_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct device *dev = bdma_chan->dchan.device->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) u64 *sts_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) dma_addr_t bd_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) dma_addr_t sts_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) int sts_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #ifdef CONFIG_PCI_MSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d", bdma_chan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * Allocate space for DMA descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * (add an extra element for link descriptor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) bd_ptr = dma_alloc_coherent(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) (bd_num + 1) * sizeof(struct tsi721_dma_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) &bd_phys, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (!bd_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) bdma_chan->bd_num = bd_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) bdma_chan->bd_phys = bd_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) bdma_chan->bd_base = bd_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) tsi_debug(DMA, &bdma_chan->dchan.dev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) "DMAC%d descriptors @ %p (phys = %pad)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) bdma_chan->id, bd_ptr, &bd_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /* Allocate space for descriptor status FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) sts_size = ((bd_num + 1) >= TSI721_DMA_MINSTSSZ) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) (bd_num + 1) : TSI721_DMA_MINSTSSZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) sts_size = roundup_pow_of_two(sts_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) sts_ptr = dma_alloc_coherent(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) sts_size * sizeof(struct tsi721_dma_sts),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) &sts_phys, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (!sts_ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /* Free space allocated for DMA descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) dma_free_coherent(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) (bd_num + 1) * sizeof(struct tsi721_dma_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) bd_ptr, bd_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) bdma_chan->bd_base = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) bdma_chan->sts_phys = sts_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) bdma_chan->sts_base = sts_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) bdma_chan->sts_size = sts_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) tsi_debug(DMA, &bdma_chan->dchan.dev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) "DMAC%d desc status FIFO @ %p (phys = %pad) size=0x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) bdma_chan->id, sts_ptr, &sts_phys, sts_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /* Initialize DMA descriptors ring using added link descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) bd_ptr[bd_num].type_id = cpu_to_le32(DTYPE3 << 29);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) bd_ptr[bd_num].next_lo = cpu_to_le32((u64)bd_phys &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) TSI721_DMAC_DPTRL_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) bd_ptr[bd_num].next_hi = cpu_to_le32((u64)bd_phys >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /* Setup DMA descriptor pointers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) iowrite32(((u64)bd_phys >> 32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) bdma_chan->regs + TSI721_DMAC_DPTRH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) bdma_chan->regs + TSI721_DMAC_DPTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /* Setup descriptor status FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) iowrite32(((u64)sts_phys >> 32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) bdma_chan->regs + TSI721_DMAC_DSBH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) bdma_chan->regs + TSI721_DMAC_DSBL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) bdma_chan->regs + TSI721_DMAC_DSSZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /* Clear interrupt bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) iowrite32(TSI721_DMAC_INT_ALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) bdma_chan->regs + TSI721_DMAC_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) ioread32(bdma_chan->regs + TSI721_DMAC_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #ifdef CONFIG_PCI_MSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /* Request interrupt service if we are in MSI-X mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (priv->flags & TSI721_USING_MSIX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) int rc, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) idx = TSI721_VECT_DMA0_DONE + bdma_chan->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) priv->msix[idx].irq_name, (void *)bdma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) tsi_debug(DMA, &bdma_chan->dchan.dev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) "Unable to get MSI-X for DMAC%d-DONE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) bdma_chan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) idx = TSI721_VECT_DMA0_INT + bdma_chan->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) priv->msix[idx].irq_name, (void *)bdma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) tsi_debug(DMA, &bdma_chan->dchan.dev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) "Unable to get MSI-X for DMAC%d-INT",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) bdma_chan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) free_irq(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) priv->msix[TSI721_VECT_DMA0_DONE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) bdma_chan->id].vector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) (void *)bdma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /* Free space allocated for DMA descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) dma_free_coherent(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) (bd_num + 1) * sizeof(struct tsi721_dma_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) bd_ptr, bd_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) bdma_chan->bd_base = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) /* Free space allocated for status descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) dma_free_coherent(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) sts_size * sizeof(struct tsi721_dma_sts),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) sts_ptr, sts_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) bdma_chan->sts_base = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #endif /* CONFIG_PCI_MSI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) /* Toggle DMA channel initialization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) ioread32(bdma_chan->regs + TSI721_DMAC_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) bdma_chan->wr_count = bdma_chan->wr_count_next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) bdma_chan->sts_rdptr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) u32 ch_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) #ifdef CONFIG_PCI_MSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (!bdma_chan->bd_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /* Check if DMA channel still running */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) ch_stat = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (ch_stat & TSI721_DMAC_STS_RUN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) /* Put DMA channel into init state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) #ifdef CONFIG_PCI_MSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (priv->flags & TSI721_USING_MSIX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) free_irq(priv->msix[TSI721_VECT_DMA0_DONE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) bdma_chan->id].vector, (void *)bdma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) free_irq(priv->msix[TSI721_VECT_DMA0_INT +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) bdma_chan->id].vector, (void *)bdma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) #endif /* CONFIG_PCI_MSI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /* Free space allocated for DMA descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) dma_free_coherent(bdma_chan->dchan.device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) (bdma_chan->bd_num + 1) * sizeof(struct tsi721_dma_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) bdma_chan->bd_base, bdma_chan->bd_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) bdma_chan->bd_base = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /* Free space allocated for status FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) dma_free_coherent(bdma_chan->dchan.device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) bdma_chan->sts_size * sizeof(struct tsi721_dma_sts),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) bdma_chan->sts_base, bdma_chan->sts_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) bdma_chan->sts_base = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) tsi721_bdma_interrupt_enable(struct tsi721_bdma_chan *bdma_chan, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /* Clear pending BDMA channel interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) iowrite32(TSI721_DMAC_INT_ALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) bdma_chan->regs + TSI721_DMAC_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) ioread32(bdma_chan->regs + TSI721_DMAC_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) /* Enable BDMA channel interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) iowrite32(TSI721_DMAC_INT_ALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) bdma_chan->regs + TSI721_DMAC_INTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /* Disable BDMA channel interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) /* Clear pending BDMA channel interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) iowrite32(TSI721_DMAC_INT_ALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) bdma_chan->regs + TSI721_DMAC_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static bool tsi721_dma_is_idle(struct tsi721_bdma_chan *bdma_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) u32 sts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) return ((sts & TSI721_DMAC_STS_RUN) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /* Disable BDMA channel interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (bdma_chan->active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) tasklet_hi_schedule(&bdma_chan->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) #ifdef CONFIG_PCI_MSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * tsi721_omsg_msix - MSI-X interrupt handler for BDMA channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * @irq: Linux interrupt number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * @ptr: Pointer to interrupt-specific data (BDMA channel structure)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * Handles BDMA channel interrupts signaled using MSI-X.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) static irqreturn_t tsi721_bdma_msix(int irq, void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct tsi721_bdma_chan *bdma_chan = ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (bdma_chan->active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) tasklet_hi_schedule(&bdma_chan->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) #endif /* CONFIG_PCI_MSI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /* Must be called with the spinlock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) static void tsi721_start_dma(struct tsi721_bdma_chan *bdma_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (!tsi721_dma_is_idle(bdma_chan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) tsi_err(&bdma_chan->dchan.dev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) "DMAC%d Attempt to start non-idle channel",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) bdma_chan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) if (bdma_chan->wr_count == bdma_chan->wr_count_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) tsi_err(&bdma_chan->dchan.dev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) "DMAC%d Attempt to start DMA with no BDs ready %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) bdma_chan->id, task_pid_nr(current));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d (wrc=%d) %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) bdma_chan->id, bdma_chan->wr_count_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) task_pid_nr(current));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) iowrite32(bdma_chan->wr_count_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) bdma_chan->regs + TSI721_DMAC_DWRCNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) ioread32(bdma_chan->regs + TSI721_DMAC_DWRCNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) bdma_chan->wr_count = bdma_chan->wr_count_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) tsi721_desc_fill_init(struct tsi721_tx_desc *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) struct tsi721_dma_desc *bd_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) struct scatterlist *sg, u32 sys_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) u64 rio_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (!bd_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) /* Initialize DMA descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) bd_ptr->type_id = cpu_to_le32((DTYPE1 << 29) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) (desc->rtype << 19) | desc->destid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) (sys_size << 26));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) rio_addr = (desc->rio_addr >> 2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) ((u64)(desc->rio_addr_u & 0x3) << 62);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) bd_ptr->raddr_lo = cpu_to_le32(rio_addr & 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) bd_ptr->raddr_hi = cpu_to_le32(rio_addr >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) bd_ptr->t1.bufptr_lo = cpu_to_le32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) (u64)sg_dma_address(sg) & 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) bd_ptr->t1.bufptr_hi = cpu_to_le32((u64)sg_dma_address(sg) >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) bd_ptr->t1.s_dist = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) bd_ptr->t1.s_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) tsi721_desc_fill_end(struct tsi721_dma_desc *bd_ptr, u32 bcount, bool interrupt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (!bd_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) /* Update DMA descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (interrupt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) bd_ptr->bcount |= cpu_to_le32(bcount & TSI721_DMAD_BCOUNT1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) static void tsi721_dma_tx_err(struct tsi721_bdma_chan *bdma_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) struct tsi721_tx_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) struct dma_async_tx_descriptor *txd = &desc->txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) dma_async_tx_callback callback = txd->callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) void *param = txd->callback_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) list_move(&desc->desc_node, &bdma_chan->free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (callback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) callback(param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) static void tsi721_clr_stat(struct tsi721_bdma_chan *bdma_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) u32 srd_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) u64 *sts_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /* Check and clear descriptor status FIFO entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) srd_ptr = bdma_chan->sts_rdptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) sts_ptr = bdma_chan->sts_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) j = srd_ptr * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) while (sts_ptr[j]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) for (i = 0; i < 8 && sts_ptr[j]; i++, j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) sts_ptr[j] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) ++srd_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) srd_ptr %= bdma_chan->sts_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) j = srd_ptr * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) iowrite32(srd_ptr, bdma_chan->regs + TSI721_DMAC_DSRP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) bdma_chan->sts_rdptr = srd_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /* Must be called with the channel spinlock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) static int tsi721_submit_sg(struct tsi721_tx_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) struct dma_chan *dchan = desc->txd.chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) u32 sys_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) u64 rio_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) dma_addr_t next_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) u32 bcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) struct tsi721_dma_desc *bd_ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) u32 idx, rd_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) u32 add_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) struct device *ch_dev = &dchan->dev->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (!tsi721_dma_is_idle(bdma_chan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) tsi_err(ch_dev, "DMAC%d ERR: Attempt to use non-idle channel",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) bdma_chan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * Fill DMA channel's hardware buffer descriptors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * (NOTE: RapidIO destination address is limited to 64 bits for now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) rio_addr = desc->rio_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) next_addr = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) bcount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) sys_size = dma_to_mport(dchan->device)->sys_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) rd_idx = ioread32(bdma_chan->regs + TSI721_DMAC_DRDCNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) rd_idx %= (bdma_chan->bd_num + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) idx = bdma_chan->wr_count_next % (bdma_chan->bd_num + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (idx == bdma_chan->bd_num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) /* wrap around link descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) add_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) tsi_debug(DMA, ch_dev, "DMAC%d BD ring status: rdi=%d wri=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) bdma_chan->id, rd_idx, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) for_each_sg(desc->sg, sg, desc->sg_len, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) tsi_debug(DMAV, ch_dev, "DMAC%d sg%d/%d addr: 0x%llx len: %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) bdma_chan->id, i, desc->sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) (unsigned long long)sg_dma_address(sg), sg_dma_len(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (sg_dma_len(sg) > TSI721_BDMA_MAX_BCOUNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) tsi_err(ch_dev, "DMAC%d SG entry %d is too large",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) bdma_chan->id, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * If this sg entry forms contiguous block with previous one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * try to merge it into existing DMA descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) if (next_addr == sg_dma_address(sg) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) bcount + sg_dma_len(sg) <= TSI721_BDMA_MAX_BCOUNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) /* Adjust byte count of the descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) bcount += sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) goto entry_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) } else if (next_addr != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) /* Finalize descriptor using total byte count value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) tsi721_desc_fill_end(bd_ptr, bcount, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) tsi_debug(DMAV, ch_dev, "DMAC%d prev desc final len: %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) bdma_chan->id, bcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) desc->rio_addr = rio_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (i && idx == rd_idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) tsi_debug(DMAV, ch_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) "DMAC%d HW descriptor ring is full @ %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) bdma_chan->id, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) desc->sg = sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) desc->sg_len -= i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) bd_ptr = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) err = tsi721_desc_fill_init(desc, bd_ptr, sg, sys_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) tsi_err(ch_dev, "Failed to build desc: err=%d", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) tsi_debug(DMAV, ch_dev, "DMAC%d bd_ptr = %p did=%d raddr=0x%llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) bdma_chan->id, bd_ptr, desc->destid, desc->rio_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) next_addr = sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) bcount = sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) add_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (++idx == bdma_chan->bd_num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) /* wrap around link descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) add_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) entry_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (sg_is_last(sg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) tsi721_desc_fill_end(bd_ptr, bcount, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) tsi_debug(DMAV, ch_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) "DMAC%d last desc final len: %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) bdma_chan->id, bcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) desc->sg_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) rio_addr += sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) next_addr += sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) bdma_chan->wr_count_next += add_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) struct tsi721_tx_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d", bdma_chan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (!tsi721_dma_is_idle(bdma_chan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) * If there is no data transfer in progress, fetch new descriptor from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * the pending queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (!desc && !bdma_chan->active_tx && !list_empty(&bdma_chan->queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) desc = list_first_entry(&bdma_chan->queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) struct tsi721_tx_desc, desc_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) list_del_init((&desc->desc_node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) bdma_chan->active_tx = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) err = tsi721_submit_sg(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) tsi721_start_dma(bdma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) tsi721_dma_tx_err(bdma_chan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) tsi_debug(DMA, &bdma_chan->dchan.dev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) "DMAC%d ERR: tsi721_submit_sg failed with err=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) bdma_chan->id, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d Exit",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) bdma_chan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) static void tsi721_dma_tasklet(unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) struct tsi721_bdma_chan *bdma_chan = (struct tsi721_bdma_chan *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) u32 dmac_int, dmac_sts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d_INT = 0x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) bdma_chan->id, dmac_int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) /* Clear channel interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) iowrite32(dmac_int, bdma_chan->regs + TSI721_DMAC_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (dmac_int & TSI721_DMAC_INT_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) int i = 10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) struct tsi721_tx_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) desc = bdma_chan->active_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) tsi_err(&bdma_chan->dchan.dev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) "DMAC%d_STS = 0x%x did=%d raddr=0x%llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) bdma_chan->id, dmac_sts, desc->destid, desc->rio_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) /* Re-initialize DMA channel if possible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) if ((dmac_sts & TSI721_DMAC_STS_ABORT) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) tsi721_clr_stat(bdma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) spin_lock(&bdma_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) /* Put DMA channel into init state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) iowrite32(TSI721_DMAC_CTL_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) bdma_chan->regs + TSI721_DMAC_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) i--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) } while ((dmac_sts & TSI721_DMAC_STS_ABORT) && i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (dmac_sts & TSI721_DMAC_STS_ABORT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) tsi_err(&bdma_chan->dchan.dev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) "Failed to re-initiate DMAC%d", bdma_chan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) spin_unlock(&bdma_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) /* Setup DMA descriptor pointers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) iowrite32(((u64)bdma_chan->bd_phys >> 32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) bdma_chan->regs + TSI721_DMAC_DPTRH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) iowrite32(((u64)bdma_chan->bd_phys & TSI721_DMAC_DPTRL_MASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) bdma_chan->regs + TSI721_DMAC_DPTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) /* Setup descriptor status FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) iowrite32(((u64)bdma_chan->sts_phys >> 32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) bdma_chan->regs + TSI721_DMAC_DSBH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) iowrite32(((u64)bdma_chan->sts_phys & TSI721_DMAC_DSBL_MASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) bdma_chan->regs + TSI721_DMAC_DSBL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) iowrite32(TSI721_DMAC_DSSZ_SIZE(bdma_chan->sts_size),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) bdma_chan->regs + TSI721_DMAC_DSSZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) /* Clear interrupt bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) iowrite32(TSI721_DMAC_INT_ALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) bdma_chan->regs + TSI721_DMAC_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) ioread32(bdma_chan->regs + TSI721_DMAC_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) bdma_chan->wr_count = bdma_chan->wr_count_next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) bdma_chan->sts_rdptr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) desc = bdma_chan->active_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) desc->status = DMA_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) dma_cookie_complete(&desc->txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) list_add(&desc->desc_node, &bdma_chan->free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) bdma_chan->active_tx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) if (bdma_chan->active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) tsi721_advance_work(bdma_chan, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) spin_unlock(&bdma_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (dmac_int & TSI721_DMAC_INT_STFULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) tsi_err(&bdma_chan->dchan.dev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) "DMAC%d descriptor status FIFO is full",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) bdma_chan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) if (dmac_int & (TSI721_DMAC_INT_DONE | TSI721_DMAC_INT_IOFDONE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) struct tsi721_tx_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) tsi721_clr_stat(bdma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) spin_lock(&bdma_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) desc = bdma_chan->active_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (desc->sg_len == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) dma_async_tx_callback callback = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) void *param = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) desc->status = DMA_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) dma_cookie_complete(&desc->txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) if (desc->txd.flags & DMA_PREP_INTERRUPT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) callback = desc->txd.callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) param = desc->txd.callback_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) list_add(&desc->desc_node, &bdma_chan->free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) bdma_chan->active_tx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) if (bdma_chan->active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) tsi721_advance_work(bdma_chan, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) spin_unlock(&bdma_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) if (callback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) callback(param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) if (bdma_chan->active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) tsi721_advance_work(bdma_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) bdma_chan->active_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) spin_unlock(&bdma_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) /* Re-Enable BDMA channel interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) iowrite32(TSI721_DMAC_INT_ALL, bdma_chan->regs + TSI721_DMAC_INTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) struct tsi721_tx_desc *desc = to_tsi721_desc(txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(txd->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) /* Check if the descriptor is detached from any lists */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) if (!list_empty(&desc->desc_node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) tsi_err(&bdma_chan->dchan.dev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) "DMAC%d wrong state of descriptor %p",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) bdma_chan->id, txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) spin_lock_bh(&bdma_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) if (!bdma_chan->active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) spin_unlock_bh(&bdma_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) cookie = dma_cookie_assign(txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) desc->status = DMA_IN_PROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) list_add_tail(&desc->desc_node, &bdma_chan->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) tsi721_advance_work(bdma_chan, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) spin_unlock_bh(&bdma_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) return cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) struct tsi721_tx_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) if (bdma_chan->bd_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) return dma_txqueue_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) /* Initialize BDMA channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (tsi721_bdma_ch_init(bdma_chan, dma_desc_per_channel)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) tsi_err(&dchan->dev->device, "Unable to initialize DMAC%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) bdma_chan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) /* Allocate queue of transaction descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) desc = kcalloc(dma_txqueue_sz, sizeof(struct tsi721_tx_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) if (!desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) tsi721_bdma_ch_free(bdma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) bdma_chan->tx_desc = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) for (i = 0; i < dma_txqueue_sz; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) dma_async_tx_descriptor_init(&desc[i].txd, dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) desc[i].txd.tx_submit = tsi721_tx_submit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) desc[i].txd.flags = DMA_CTRL_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) list_add(&desc[i].desc_node, &bdma_chan->free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) dma_cookie_init(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) bdma_chan->active = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) tsi721_bdma_interrupt_enable(bdma_chan, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) return dma_txqueue_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) static void tsi721_sync_dma_irq(struct tsi721_bdma_chan *bdma_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) #ifdef CONFIG_PCI_MSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) if (priv->flags & TSI721_USING_MSIX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) synchronize_irq(priv->msix[TSI721_VECT_DMA0_DONE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) bdma_chan->id].vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) synchronize_irq(priv->msix[TSI721_VECT_DMA0_INT +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) bdma_chan->id].vector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) synchronize_irq(priv->pdev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) static void tsi721_free_chan_resources(struct dma_chan *dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) if (!bdma_chan->bd_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) tsi721_bdma_interrupt_enable(bdma_chan, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) bdma_chan->active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) tsi721_sync_dma_irq(bdma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) tasklet_kill(&bdma_chan->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) INIT_LIST_HEAD(&bdma_chan->free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) kfree(bdma_chan->tx_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) tsi721_bdma_ch_free(bdma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) enum dma_status tsi721_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) struct dma_tx_state *txstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) enum dma_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) spin_lock_bh(&bdma_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) status = dma_cookie_status(dchan, cookie, txstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) spin_unlock_bh(&bdma_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) static void tsi721_issue_pending(struct dma_chan *dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) spin_lock_bh(&bdma_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) if (tsi721_dma_is_idle(bdma_chan) && bdma_chan->active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) tsi721_advance_work(bdma_chan, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) spin_unlock_bh(&bdma_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) struct scatterlist *sgl, unsigned int sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) enum dma_transfer_direction dir, unsigned long flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) void *tinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) struct tsi721_tx_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) struct rio_dma_ext *rext = tinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) enum dma_rtype rtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) struct dma_async_tx_descriptor *txd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) if (!sgl || !sg_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) tsi_err(&dchan->dev->device, "DMAC%d No SG list",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) bdma_chan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) tsi_debug(DMA, &dchan->dev->device, "DMAC%d %s", bdma_chan->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) (dir == DMA_DEV_TO_MEM)?"READ":"WRITE");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) if (dir == DMA_DEV_TO_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) rtype = NREAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) else if (dir == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) switch (rext->wr_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) case RDW_ALL_NWRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) rtype = ALL_NWRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) case RDW_ALL_NWRITE_R:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) rtype = ALL_NWRITE_R;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) case RDW_LAST_NWRITE_R:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) rtype = LAST_NWRITE_R;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) tsi_err(&dchan->dev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) "DMAC%d Unsupported DMA direction option",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) bdma_chan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) spin_lock_bh(&bdma_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) if (!list_empty(&bdma_chan->free_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) desc = list_first_entry(&bdma_chan->free_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) struct tsi721_tx_desc, desc_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) list_del_init(&desc->desc_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) desc->destid = rext->destid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) desc->rio_addr = rext->rio_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) desc->rio_addr_u = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) desc->rtype = rtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) desc->sg_len = sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) desc->sg = sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) txd = &desc->txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) txd->flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) spin_unlock_bh(&bdma_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) if (!txd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) tsi_debug(DMA, &dchan->dev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) "DMAC%d free TXD is not available", bdma_chan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) return ERR_PTR(-EBUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) return txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) static int tsi721_terminate_all(struct dma_chan *dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) struct tsi721_tx_desc *desc, *_d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) LIST_HEAD(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) spin_lock_bh(&bdma_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) bdma_chan->active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) while (!tsi721_dma_is_idle(bdma_chan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) udelay(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) #if (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) /* make sure to stop the transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) iowrite32(TSI721_DMAC_CTL_SUSP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) bdma_chan->regs + TSI721_DMAC_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) /* Wait until DMA channel stops */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) } while ((dmac_int & TSI721_DMAC_INT_SUSP) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) if (bdma_chan->active_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) list_add(&bdma_chan->active_tx->desc_node, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) list_splice_init(&bdma_chan->queue, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) list_for_each_entry_safe(desc, _d, &list, desc_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) tsi721_dma_tx_err(bdma_chan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) spin_unlock_bh(&bdma_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) static void tsi721_dma_stop(struct tsi721_bdma_chan *bdma_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) if (!bdma_chan->active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) spin_lock_bh(&bdma_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) if (!tsi721_dma_is_idle(bdma_chan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) int timeout = 100000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) /* stop the transfer in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) iowrite32(TSI721_DMAC_CTL_SUSP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) bdma_chan->regs + TSI721_DMAC_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) /* Wait until DMA channel stops */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) while (!tsi721_dma_is_idle(bdma_chan) && --timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) spin_unlock_bh(&bdma_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) void tsi721_dma_stop_all(struct tsi721_device *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) for (i = 0; i < TSI721_DMA_MAXCH; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) if ((i != TSI721_DMACH_MAINT) && (dma_sel & (1 << i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) tsi721_dma_stop(&priv->bdma[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) int tsi721_register_dma(struct tsi721_device *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) int nr_channels = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) struct rio_mport *mport = &priv->mport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) INIT_LIST_HEAD(&mport->dma.channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) for (i = 0; i < TSI721_DMA_MAXCH; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) if ((i == TSI721_DMACH_MAINT) || (dma_sel & (1 << i)) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) bdma_chan->dchan.device = &mport->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) bdma_chan->dchan.cookie = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) bdma_chan->dchan.chan_id = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) bdma_chan->id = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) bdma_chan->active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) spin_lock_init(&bdma_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) bdma_chan->active_tx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) INIT_LIST_HEAD(&bdma_chan->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) INIT_LIST_HEAD(&bdma_chan->free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) (unsigned long)bdma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) list_add_tail(&bdma_chan->dchan.device_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) &mport->dma.channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) nr_channels++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) mport->dma.chancnt = nr_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) dma_cap_zero(mport->dma.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) dma_cap_set(DMA_PRIVATE, mport->dma.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) dma_cap_set(DMA_SLAVE, mport->dma.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) mport->dma.dev = &priv->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) mport->dma.device_alloc_chan_resources = tsi721_alloc_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) mport->dma.device_free_chan_resources = tsi721_free_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) mport->dma.device_tx_status = tsi721_tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) mport->dma.device_issue_pending = tsi721_issue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) mport->dma.device_terminate_all = tsi721_terminate_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) err = dma_async_device_register(&mport->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) tsi_err(&priv->pdev->dev, "Failed to register DMA device");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) void tsi721_unregister_dma(struct tsi721_device *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) struct rio_mport *mport = &priv->mport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) struct dma_chan *chan, *_c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) struct tsi721_bdma_chan *bdma_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) tsi721_dma_stop_all(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) dma_async_device_unregister(&mport->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) list_for_each_entry_safe(chan, _c, &mport->dma.channels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) device_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) bdma_chan = to_tsi721_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) if (bdma_chan->active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) tsi721_bdma_interrupt_enable(bdma_chan, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) bdma_chan->active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) tsi721_sync_dma_irq(bdma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) tasklet_kill(&bdma_chan->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) INIT_LIST_HEAD(&bdma_chan->free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) kfree(bdma_chan->tx_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) tsi721_bdma_ch_free(bdma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) list_del(&chan->device_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) }