^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* ebus.c: EBUS DMA library code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 1999 David S. Miller (davem@redhat.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/ebus_dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define EBDMA_CSR 0x00UL /* Control/Status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define EBDMA_ADDR 0x04UL /* DMA Address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define EBDMA_COUNT 0x08UL /* DMA Count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define EBDMA_CSR_INT_PEND 0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define EBDMA_CSR_ERR_PEND 0x00000002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define EBDMA_CSR_DRAIN 0x00000004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define EBDMA_CSR_INT_EN 0x00000010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define EBDMA_CSR_RESET 0x00000080
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define EBDMA_CSR_WRITE 0x00000100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define EBDMA_CSR_EN_DMA 0x00000200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define EBDMA_CSR_CYC_PEND 0x00000400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define EBDMA_CSR_DIAG_RD_DONE 0x00000800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define EBDMA_CSR_DIAG_WR_DONE 0x00001000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define EBDMA_CSR_EN_CNT 0x00002000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define EBDMA_CSR_TC 0x00004000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define EBDMA_CSR_DIS_CSR_DRN 0x00010000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define EBDMA_CSR_BURST_SZ_MASK 0x000c0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define EBDMA_CSR_BURST_SZ_1 0x00080000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define EBDMA_CSR_BURST_SZ_4 0x00000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define EBDMA_CSR_BURST_SZ_8 0x00040000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define EBDMA_CSR_BURST_SZ_16 0x000c0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define EBDMA_CSR_DIAG_EN 0x00100000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define EBDMA_CSR_DIS_ERR_PEND 0x00400000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define EBDMA_CSR_TCI_DIS 0x00800000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define EBDMA_CSR_EN_NEXT 0x01000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define EBDMA_CSR_DMA_ON 0x02000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define EBDMA_CSR_A_LOADED 0x04000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define EBDMA_CSR_NA_LOADED 0x08000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define EBDMA_CSR_DEV_ID_MASK 0xf0000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define EBUS_DMA_RESET_TIMEOUT 10000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static void __ebus_dma_reset(struct ebus_dma_info *p, int no_drain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) u32 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) writel(EBDMA_CSR_RESET, p->regs + EBDMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if (no_drain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) for (i = EBUS_DMA_RESET_TIMEOUT; i > 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) val = readl(p->regs + EBDMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) if (!(val & (EBDMA_CSR_DRAIN | EBDMA_CSR_CYC_PEND)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static irqreturn_t ebus_dma_irq(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct ebus_dma_info *p = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) u32 csr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) spin_lock_irqsave(&p->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) csr = readl(p->regs + EBDMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) writel(csr, p->regs + EBDMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) spin_unlock_irqrestore(&p->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) if (csr & EBDMA_CSR_ERR_PEND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) printk(KERN_CRIT "ebus_dma(%s): DMA error!\n", p->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) p->callback(p, EBUS_DMA_EVENT_ERROR, p->client_cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) } else if (csr & EBDMA_CSR_INT_PEND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) p->callback(p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) (csr & EBDMA_CSR_TC) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) EBUS_DMA_EVENT_DMA : EBUS_DMA_EVENT_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) p->client_cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) int ebus_dma_register(struct ebus_dma_info *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) u32 csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (!p->regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (p->flags & ~(EBUS_DMA_FLAG_USE_EBDMA_HANDLER |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) EBUS_DMA_FLAG_TCI_DISABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if ((p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) && !p->callback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (!strlen(p->name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) __ebus_dma_reset(p, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) csr = EBDMA_CSR_BURST_SZ_16 | EBDMA_CSR_EN_CNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (p->flags & EBUS_DMA_FLAG_TCI_DISABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) csr |= EBDMA_CSR_TCI_DIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) writel(csr, p->regs + EBDMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) EXPORT_SYMBOL(ebus_dma_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) int ebus_dma_irq_enable(struct ebus_dma_info *p, int on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) u32 csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (on) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (request_irq(p->irq, ebus_dma_irq, IRQF_SHARED, p->name, p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) spin_lock_irqsave(&p->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) csr = readl(p->regs + EBDMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) csr |= EBDMA_CSR_INT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) writel(csr, p->regs + EBDMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) spin_unlock_irqrestore(&p->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) spin_lock_irqsave(&p->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) csr = readl(p->regs + EBDMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) csr &= ~EBDMA_CSR_INT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) writel(csr, p->regs + EBDMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) spin_unlock_irqrestore(&p->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) free_irq(p->irq, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) EXPORT_SYMBOL(ebus_dma_irq_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) void ebus_dma_unregister(struct ebus_dma_info *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) u32 csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) int irq_on = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) spin_lock_irqsave(&p->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) csr = readl(p->regs + EBDMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (csr & EBDMA_CSR_INT_EN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) csr &= ~EBDMA_CSR_INT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) writel(csr, p->regs + EBDMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) irq_on = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) spin_unlock_irqrestore(&p->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (irq_on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) free_irq(p->irq, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) EXPORT_SYMBOL(ebus_dma_unregister);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) int ebus_dma_request(struct ebus_dma_info *p, dma_addr_t bus_addr, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) u32 csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (len >= (1 << 24))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) spin_lock_irqsave(&p->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) csr = readl(p->regs + EBDMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (!(csr & EBDMA_CSR_EN_DMA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (csr & EBDMA_CSR_NA_LOADED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) writel(len, p->regs + EBDMA_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) writel(bus_addr, p->regs + EBDMA_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) spin_unlock_irqrestore(&p->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) EXPORT_SYMBOL(ebus_dma_request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) void ebus_dma_prepare(struct ebus_dma_info *p, int write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) u32 csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) spin_lock_irqsave(&p->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) __ebus_dma_reset(p, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) csr = (EBDMA_CSR_INT_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) EBDMA_CSR_EN_CNT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) EBDMA_CSR_BURST_SZ_16 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) EBDMA_CSR_EN_NEXT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) csr |= EBDMA_CSR_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (p->flags & EBUS_DMA_FLAG_TCI_DISABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) csr |= EBDMA_CSR_TCI_DIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) writel(csr, p->regs + EBDMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) spin_unlock_irqrestore(&p->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) EXPORT_SYMBOL(ebus_dma_prepare);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) unsigned int ebus_dma_residue(struct ebus_dma_info *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return readl(p->regs + EBDMA_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) EXPORT_SYMBOL(ebus_dma_residue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) unsigned int ebus_dma_addr(struct ebus_dma_info *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return readl(p->regs + EBDMA_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) EXPORT_SYMBOL(ebus_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) void ebus_dma_enable(struct ebus_dma_info *p, int on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) u32 orig_csr, csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) spin_lock_irqsave(&p->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) orig_csr = csr = readl(p->regs + EBDMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) csr |= EBDMA_CSR_EN_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) csr &= ~EBDMA_CSR_EN_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if ((orig_csr & EBDMA_CSR_EN_DMA) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) (csr & EBDMA_CSR_EN_DMA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) writel(csr, p->regs + EBDMA_CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) spin_unlock_irqrestore(&p->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) EXPORT_SYMBOL(ebus_dma_enable);