^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * SA11x0 DMAengine support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2012 Russell King
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Derived in part from arch/arm/mach-sa1100/dma.c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2000, 2001 by Nicolas Pitre
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "virt-dma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define NR_PHY_CHAN 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define DMA_ALIGN 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define DMA_MAX_SIZE 0x1fff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define DMA_CHUNK_SIZE 0x1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define DMA_DDAR 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define DMA_DCSR_S 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define DMA_DCSR_C 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define DMA_DCSR_R 0x0c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define DMA_DBSA 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define DMA_DBTA 0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define DMA_DBSB 0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define DMA_DBTB 0x1c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define DMA_SIZE 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define DCSR_RUN (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define DCSR_IE (1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define DCSR_ERROR (1 << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define DCSR_DONEA (1 << 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define DCSR_STRTA (1 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define DCSR_DONEB (1 << 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define DCSR_STRTB (1 << 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define DCSR_BIU (1 << 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define DDAR_RW (1 << 0) /* 0 = W, 1 = R */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define DDAR_E (1 << 1) /* 0 = LE, 1 = BE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define DDAR_BS (1 << 2) /* 0 = BS4, 1 = BS8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define DDAR_DW (1 << 3) /* 0 = 8b, 1 = 16b */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define DDAR_Ser0UDCTr (0x0 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define DDAR_Ser0UDCRc (0x1 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define DDAR_Ser1SDLCTr (0x2 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define DDAR_Ser1SDLCRc (0x3 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define DDAR_Ser1UARTTr (0x4 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define DDAR_Ser1UARTRc (0x5 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define DDAR_Ser2ICPTr (0x6 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define DDAR_Ser2ICPRc (0x7 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define DDAR_Ser3UARTTr (0x8 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define DDAR_Ser3UARTRc (0x9 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define DDAR_Ser4MCP0Tr (0xa << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define DDAR_Ser4MCP0Rc (0xb << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define DDAR_Ser4MCP1Tr (0xc << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define DDAR_Ser4MCP1Rc (0xd << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define DDAR_Ser4SSPTr (0xe << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define DDAR_Ser4SSPRc (0xf << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct sa11x0_dma_sg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) u32 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct sa11x0_dma_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct virt_dma_desc vd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) u32 ddar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) unsigned period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) bool cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) unsigned sglen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct sa11x0_dma_sg sg[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct sa11x0_dma_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct sa11x0_dma_chan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct virt_dma_chan vc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /* protected by c->vc.lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct sa11x0_dma_phy *phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) enum dma_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /* protected by d->lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) u32 ddar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct sa11x0_dma_phy {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct sa11x0_dma_dev *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) unsigned num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct sa11x0_dma_chan *vchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) /* Protected by c->vc.lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) unsigned sg_load;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct sa11x0_dma_desc *txd_load;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) unsigned sg_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct sa11x0_dma_desc *txd_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) u32 dbs[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) u32 dbt[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) u32 dcsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct sa11x0_dma_dev {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct dma_device slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct tasklet_struct task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct list_head chan_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct sa11x0_dma_phy phy[NR_PHY_CHAN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) return container_of(chan, struct sa11x0_dma_chan, vc.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) return container_of(dmadev, struct sa11x0_dma_dev, slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) return vd ? container_of(vd, struct sa11x0_dma_desc, vd) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) static void sa11x0_dma_free_desc(struct virt_dma_desc *vd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) kfree(container_of(vd, struct sa11x0_dma_desc, vd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) list_del(&txd->vd.node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) p->txd_load = txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) p->sg_load = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) p->num, &txd->vd, txd->vd.tx.cookie, txd->ddar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct sa11x0_dma_chan *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct sa11x0_dma_desc *txd = p->txd_load;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct sa11x0_dma_sg *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) void __iomem *base = p->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) unsigned dbsx, dbtx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) u32 dcsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (!txd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) dcsr = readl_relaxed(base + DMA_DCSR_R);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) /* Don't try to load the next transfer if both buffers are started */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if ((dcsr & (DCSR_STRTA | DCSR_STRTB)) == (DCSR_STRTA | DCSR_STRTB))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (p->sg_load == txd->sglen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (!txd->cyclic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * We have reached the end of the current descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * Peek at the next descriptor, and if compatible with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * the current, start processing it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (txn && txn->ddar == txd->ddar) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) txd = txn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) sa11x0_dma_start_desc(p, txn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) p->txd_load = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /* Cyclic: reset back to beginning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) p->sg_load = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) sg = &txd->sg[p->sg_load++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) /* Select buffer to load according to channel status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (((dcsr & (DCSR_BIU | DCSR_STRTB)) == (DCSR_BIU | DCSR_STRTB)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) ((dcsr & (DCSR_BIU | DCSR_STRTA)) == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) dbsx = DMA_DBSA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) dbtx = DMA_DBTA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) dcsr = DCSR_STRTA | DCSR_IE | DCSR_RUN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) dbsx = DMA_DBSB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) dbtx = DMA_DBTB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) dcsr = DCSR_STRTB | DCSR_IE | DCSR_RUN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) writel_relaxed(sg->addr, base + dbsx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) writel_relaxed(sg->len, base + dbtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) writel(dcsr, base + DMA_DCSR_S);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) dev_dbg(p->dev->slave.dev, "pchan %u: load: DCSR:%02x DBS%c:%08x DBT%c:%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) p->num, dcsr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 'A' + (dbsx == DMA_DBSB), sg->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 'A' + (dbtx == DMA_DBTB), sg->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct sa11x0_dma_chan *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct sa11x0_dma_desc *txd = p->txd_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (++p->sg_done == txd->sglen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (!txd->cyclic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) vchan_cookie_complete(&txd->vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) p->sg_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) p->txd_done = p->txd_load;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (!p->txd_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) tasklet_schedule(&p->dev->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if ((p->sg_done % txd->period) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) vchan_cyclic_callback(&txd->vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /* Cyclic: reset back to beginning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) p->sg_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) sa11x0_dma_start_sg(p, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) struct sa11x0_dma_phy *p = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) struct sa11x0_dma_dev *d = p->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) struct sa11x0_dma_chan *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) u32 dcsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) dcsr = readl_relaxed(p->base + DMA_DCSR_R);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (!(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /* Clear reported status bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) writel_relaxed(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) p->base + DMA_DCSR_C);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) dev_dbg(d->slave.dev, "pchan %u: irq: DCSR:%02x\n", p->num, dcsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (dcsr & DCSR_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) dev_err(d->slave.dev, "pchan %u: error. DCSR:%02x DDAR:%08x DBSA:%08x DBTA:%08x DBSB:%08x DBTB:%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) p->num, dcsr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) readl_relaxed(p->base + DMA_DDAR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) readl_relaxed(p->base + DMA_DBSA),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) readl_relaxed(p->base + DMA_DBTA),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) readl_relaxed(p->base + DMA_DBSB),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) readl_relaxed(p->base + DMA_DBTB));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) c = p->vchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if (c) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) spin_lock_irqsave(&c->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * Now that we're holding the lock, check that the vchan
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * really is associated with this pchan before touching the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * hardware. This should always succeed, because we won't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * change p->vchan or c->phy while the channel is actively
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * transferring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (c->phy == p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (dcsr & DCSR_DONEA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) sa11x0_dma_complete(p, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (dcsr & DCSR_DONEB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) sa11x0_dma_complete(p, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) spin_unlock_irqrestore(&c->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) static void sa11x0_dma_start_txd(struct sa11x0_dma_chan *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) struct sa11x0_dma_desc *txd = sa11x0_dma_next_desc(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /* If the issued list is empty, we have no further txds to process */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if (txd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) struct sa11x0_dma_phy *p = c->phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) sa11x0_dma_start_desc(p, txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) p->txd_done = txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) p->sg_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /* The channel should not have any transfers started */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) WARN_ON(readl_relaxed(p->base + DMA_DCSR_R) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) (DCSR_STRTA | DCSR_STRTB));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) /* Clear the run and start bits before changing DDAR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) writel_relaxed(DCSR_RUN | DCSR_STRTA | DCSR_STRTB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) p->base + DMA_DCSR_C);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) writel_relaxed(txd->ddar, p->base + DMA_DDAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) /* Try to start both buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) sa11x0_dma_start_sg(p, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) sa11x0_dma_start_sg(p, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) static void sa11x0_dma_tasklet(struct tasklet_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) struct sa11x0_dma_dev *d = from_tasklet(d, t, task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) struct sa11x0_dma_phy *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) struct sa11x0_dma_chan *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) unsigned pch, pch_alloc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) dev_dbg(d->slave.dev, "tasklet enter\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) list_for_each_entry(c, &d->slave.channels, vc.chan.device_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) spin_lock_irq(&c->vc.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) p = c->phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (p && !p->txd_done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) sa11x0_dma_start_txd(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) if (!p->txd_done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /* No current txd associated with this channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) dev_dbg(d->slave.dev, "pchan %u: free\n", p->num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) /* Mark this channel free */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) c->phy = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) p->vchan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) spin_unlock_irq(&c->vc.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) spin_lock_irq(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) for (pch = 0; pch < NR_PHY_CHAN; pch++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) p = &d->phy[pch];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) c = list_first_entry(&d->chan_pending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) struct sa11x0_dma_chan, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) list_del_init(&c->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) pch_alloc |= 1 << pch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) /* Mark this channel allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) p->vchan = c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) spin_unlock_irq(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) for (pch = 0; pch < NR_PHY_CHAN; pch++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (pch_alloc & (1 << pch)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) p = &d->phy[pch];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) c = p->vchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) spin_lock_irq(&c->vc.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) c->phy = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) sa11x0_dma_start_txd(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) spin_unlock_irq(&c->vc.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) dev_dbg(d->slave.dev, "tasklet exit\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) static void sa11x0_dma_free_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) spin_lock_irqsave(&d->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) list_del_init(&c->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) spin_unlock_irqrestore(&d->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) vchan_free_chan_resources(&c->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) unsigned reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) u32 dcsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) dcsr = readl_relaxed(p->base + DMA_DCSR_R);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if ((dcsr & (DCSR_BIU | DCSR_STRTA)) == DCSR_STRTA ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) (dcsr & (DCSR_BIU | DCSR_STRTB)) == DCSR_BIU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) reg = DMA_DBSA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) reg = DMA_DBSB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) return readl_relaxed(p->base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) dma_cookie_t cookie, struct dma_tx_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) struct sa11x0_dma_phy *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) struct virt_dma_desc *vd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) enum dma_status ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) ret = dma_cookie_status(&c->vc.chan, cookie, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (ret == DMA_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) if (!state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) return c->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) spin_lock_irqsave(&c->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) p = c->phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * If the cookie is on our issue queue, then the residue is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * its total size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) vd = vchan_find_desc(&c->vc, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (vd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) state->residue = container_of(vd, struct sa11x0_dma_desc, vd)->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) } else if (!p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) state->residue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) struct sa11x0_dma_desc *txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) size_t bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (p->txd_done && p->txd_done->vd.tx.cookie == cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) txd = p->txd_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) else if (p->txd_load && p->txd_load->vd.tx.cookie == cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) txd = p->txd_load;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) txd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) ret = c->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (txd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) dma_addr_t addr = sa11x0_dma_pos(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) dev_vdbg(d->slave.dev, "tx_status: addr:%pad\n", &addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) for (i = 0; i < txd->sglen; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) i, txd->sg[i].addr, txd->sg[i].len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (addr >= txd->sg[i].addr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) addr < txd->sg[i].addr + txd->sg[i].len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) unsigned len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) len = txd->sg[i].len -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) (addr - txd->sg[i].addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) dev_vdbg(d->slave.dev, "tx_status: [%u] +%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) i, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) bytes += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) for (; i < txd->sglen; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x ++\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) i, txd->sg[i].addr, txd->sg[i].len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) bytes += txd->sg[i].len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) state->residue = bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) spin_unlock_irqrestore(&c->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) dev_vdbg(d->slave.dev, "tx_status: bytes 0x%x\n", state->residue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * Move pending txds to the issued list, and re-init pending list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * If not already pending, add this channel to the list of pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * channels and trigger the tasklet to run.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) static void sa11x0_dma_issue_pending(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) spin_lock_irqsave(&c->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (vchan_issue_pending(&c->vc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (!c->phy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) spin_lock(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) if (list_empty(&c->node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) list_add_tail(&c->node, &d->chan_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) tasklet_schedule(&d->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) spin_unlock(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) spin_unlock_irqrestore(&c->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) struct dma_chan *chan, struct scatterlist *sg, unsigned int sglen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) enum dma_transfer_direction dir, unsigned long flags, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) struct sa11x0_dma_desc *txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) struct scatterlist *sgent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) unsigned i, j = sglen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) size_t size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) /* SA11x0 channels can only operate in their native direction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) &c->vc, c->ddar, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) /* Do not allow zero-sized txds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) if (sglen == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) for_each_sg(sg, sgent, sglen, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) dma_addr_t addr = sg_dma_address(sgent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) unsigned int len = sg_dma_len(sgent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (len > DMA_MAX_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (addr & DMA_ALIGN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %pad\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) &c->vc, &addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) txd = kzalloc(struct_size(txd, sg, j), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (!txd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) for_each_sg(sg, sgent, sglen, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) dma_addr_t addr = sg_dma_address(sgent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) unsigned len = sg_dma_len(sgent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) size += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) unsigned tlen = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * Check whether the transfer will fit. If not, try
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * to split the transfer up such that we end up with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) * equal chunks - but make sure that we preserve the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) * alignment. This avoids small segments.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) if (tlen > DMA_MAX_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) unsigned mult = DIV_ROUND_UP(tlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) DMA_MAX_SIZE & ~DMA_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) tlen = (tlen / mult) & ~DMA_ALIGN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) txd->sg[j].addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) txd->sg[j].len = tlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) addr += tlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) len -= tlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) j++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) } while (len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) txd->ddar = c->ddar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) txd->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) txd->sglen = j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) dev_dbg(chan->device->dev, "vchan %p: txd %p: size %zu nr %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) &c->vc, &txd->vd, txd->size, txd->sglen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) return vchan_tx_prep(&c->vc, &txd->vd, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) enum dma_transfer_direction dir, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) struct sa11x0_dma_desc *txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) unsigned i, j, k, sglen, sgperiod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) /* SA11x0 channels can only operate in their native direction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) &c->vc, c->ddar, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) sgperiod = DIV_ROUND_UP(period, DMA_MAX_SIZE & ~DMA_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) sglen = size * sgperiod / period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) /* Do not allow zero-sized txds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) if (sglen == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) txd = kzalloc(struct_size(txd, sg, sglen), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) if (!txd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) for (i = k = 0; i < size / period; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) size_t tlen, len = period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) for (j = 0; j < sgperiod; j++, k++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) tlen = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) if (tlen > DMA_MAX_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) unsigned mult = DIV_ROUND_UP(tlen, DMA_MAX_SIZE & ~DMA_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) tlen = (tlen / mult) & ~DMA_ALIGN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) txd->sg[k].addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) txd->sg[k].len = tlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) addr += tlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) len -= tlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) WARN_ON(len != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) WARN_ON(k != sglen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) txd->ddar = c->ddar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) txd->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) txd->sglen = sglen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) txd->cyclic = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) txd->period = sgperiod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) static int sa11x0_dma_device_config(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) struct dma_slave_config *cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) dma_addr_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) enum dma_slave_buswidth width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) u32 maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) if (ddar & DDAR_RW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) addr = cfg->src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) width = cfg->src_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) maxburst = cfg->src_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) addr = cfg->dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) width = cfg->dst_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) maxburst = cfg->dst_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if ((width != DMA_SLAVE_BUSWIDTH_1_BYTE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) width != DMA_SLAVE_BUSWIDTH_2_BYTES) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) (maxburst != 4 && maxburst != 8))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) ddar |= DDAR_DW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) if (maxburst == 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) ddar |= DDAR_BS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) dev_dbg(c->vc.chan.device->dev, "vchan %p: dma_slave_config addr %pad width %u burst %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) &c->vc, &addr, width, maxburst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) static int sa11x0_dma_device_pause(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) struct sa11x0_dma_phy *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) spin_lock_irqsave(&c->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) if (c->status == DMA_IN_PROGRESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) c->status = DMA_PAUSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) p = c->phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) if (p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) spin_lock(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) list_del_init(&c->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) spin_unlock(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) spin_unlock_irqrestore(&c->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) static int sa11x0_dma_device_resume(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) struct sa11x0_dma_phy *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) spin_lock_irqsave(&c->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (c->status == DMA_PAUSED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) c->status = DMA_IN_PROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) p = c->phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) if (p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) } else if (!list_empty(&c->vc.desc_issued)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) spin_lock(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) list_add_tail(&c->node, &d->chan_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) spin_unlock(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) spin_unlock_irqrestore(&c->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) static int sa11x0_dma_device_terminate_all(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) struct sa11x0_dma_phy *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) LIST_HEAD(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) /* Clear the tx descriptor lists */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) spin_lock_irqsave(&c->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) vchan_get_all_descriptors(&c->vc, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) p = c->phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if (p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) /* vchan is assigned to a pchan - stop the channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) writel(DCSR_RUN | DCSR_IE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) DCSR_STRTA | DCSR_DONEA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) DCSR_STRTB | DCSR_DONEB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) p->base + DMA_DCSR_C);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (p->txd_load) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) if (p->txd_load != p->txd_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) list_add_tail(&p->txd_load->vd.node, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) p->txd_load = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (p->txd_done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) list_add_tail(&p->txd_done->vd.node, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) p->txd_done = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) c->phy = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) spin_lock(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) p->vchan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) spin_unlock(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) tasklet_schedule(&d->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) spin_unlock_irqrestore(&c->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) vchan_dma_desc_free_list(&c->vc, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) struct sa11x0_dma_channel_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) u32 ddar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) #define CD(d1, d2) { .ddar = DDAR_##d1 | d2, .name = #d1 }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) static const struct sa11x0_dma_channel_desc chan_desc[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) CD(Ser0UDCTr, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) CD(Ser0UDCRc, DDAR_RW),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) CD(Ser1SDLCTr, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) CD(Ser1SDLCRc, DDAR_RW),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) CD(Ser1UARTTr, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) CD(Ser1UARTRc, DDAR_RW),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) CD(Ser2ICPTr, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) CD(Ser2ICPRc, DDAR_RW),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) CD(Ser3UARTTr, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) CD(Ser3UARTRc, DDAR_RW),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) CD(Ser4MCP0Tr, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) CD(Ser4MCP0Rc, DDAR_RW),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) CD(Ser4MCP1Tr, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) CD(Ser4MCP1Rc, DDAR_RW),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) CD(Ser4SSPTr, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) CD(Ser4SSPRc, DDAR_RW),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) static const struct dma_slave_map sa11x0_dma_map[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) { "sa11x0-ir", "tx", "Ser2ICPTr" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) { "sa11x0-ir", "rx", "Ser2ICPRc" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) { "sa11x0-ssp", "tx", "Ser4SSPTr" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) { "sa11x0-ssp", "rx", "Ser4SSPRc" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) static bool sa11x0_dma_filter_fn(struct dma_chan *chan, void *param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) const char *p = param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) return !strcmp(c->name, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) static int sa11x0_dma_init_dmadev(struct dma_device *dmadev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) INIT_LIST_HEAD(&dmadev->channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) dmadev->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) dmadev->device_config = sa11x0_dma_device_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) dmadev->device_pause = sa11x0_dma_device_pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) dmadev->device_resume = sa11x0_dma_device_resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) dmadev->device_terminate_all = sa11x0_dma_device_terminate_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) dmadev->device_tx_status = sa11x0_dma_tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) dmadev->device_issue_pending = sa11x0_dma_issue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) for (i = 0; i < ARRAY_SIZE(chan_desc); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) struct sa11x0_dma_chan *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) c = kzalloc(sizeof(*c), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) if (!c) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) dev_err(dev, "no memory for channel %u\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) c->status = DMA_IN_PROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) c->ddar = chan_desc[i].ddar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) c->name = chan_desc[i].name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) INIT_LIST_HEAD(&c->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) c->vc.desc_free = sa11x0_dma_free_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) vchan_init(&c->vc, dmadev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) return dma_async_device_register(dmadev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) static int sa11x0_dma_request_irq(struct platform_device *pdev, int nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) int irq = platform_get_irq(pdev, nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) if (irq <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) return request_irq(irq, sa11x0_dma_irq, 0, dev_name(&pdev->dev), data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) static void sa11x0_dma_free_irq(struct platform_device *pdev, int nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) int irq = platform_get_irq(pdev, nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) if (irq > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) free_irq(irq, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) static void sa11x0_dma_free_channels(struct dma_device *dmadev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) struct sa11x0_dma_chan *c, *cn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) list_for_each_entry_safe(c, cn, &dmadev->channels, vc.chan.device_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) list_del(&c->vc.chan.device_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) tasklet_kill(&c->vc.task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) kfree(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) static int sa11x0_dma_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) struct sa11x0_dma_dev *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) if (!res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) d = kzalloc(sizeof(*d), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) if (!d) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) goto err_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) spin_lock_init(&d->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) INIT_LIST_HEAD(&d->chan_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) d->slave.filter.fn = sa11x0_dma_filter_fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) d->slave.filter.mapcnt = ARRAY_SIZE(sa11x0_dma_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) d->slave.filter.map = sa11x0_dma_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) d->base = ioremap(res->start, resource_size(res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) if (!d->base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) goto err_ioremap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) tasklet_setup(&d->task, sa11x0_dma_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) for (i = 0; i < NR_PHY_CHAN; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) struct sa11x0_dma_phy *p = &d->phy[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) p->dev = d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) p->num = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) p->base = d->base + i * DMA_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) writel_relaxed(DCSR_RUN | DCSR_IE | DCSR_ERROR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) DCSR_DONEA | DCSR_STRTA | DCSR_DONEB | DCSR_STRTB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) p->base + DMA_DCSR_C);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) writel_relaxed(0, p->base + DMA_DDAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) ret = sa11x0_dma_request_irq(pdev, i, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) while (i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) i--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) sa11x0_dma_free_irq(pdev, i, &d->phy[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) goto err_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) d->slave.device_prep_dma_cyclic = sa11x0_dma_prep_dma_cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) d->slave.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) d->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) d->slave.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) BIT(DMA_SLAVE_BUSWIDTH_2_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) d->slave.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) BIT(DMA_SLAVE_BUSWIDTH_2_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) dev_warn(d->slave.dev, "failed to register slave async device: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) goto err_slave_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) platform_set_drvdata(pdev, d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) err_slave_reg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) sa11x0_dma_free_channels(&d->slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) for (i = 0; i < NR_PHY_CHAN; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) sa11x0_dma_free_irq(pdev, i, &d->phy[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) err_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) tasklet_kill(&d->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) iounmap(d->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) err_ioremap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) kfree(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) err_alloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) static int sa11x0_dma_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) struct sa11x0_dma_dev *d = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) unsigned pch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) dma_async_device_unregister(&d->slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) sa11x0_dma_free_channels(&d->slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) for (pch = 0; pch < NR_PHY_CHAN; pch++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) sa11x0_dma_free_irq(pdev, pch, &d->phy[pch]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) tasklet_kill(&d->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) iounmap(d->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) kfree(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) static int sa11x0_dma_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) unsigned pch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) for (pch = 0; pch < NR_PHY_CHAN; pch++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) struct sa11x0_dma_phy *p = &d->phy[pch];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) u32 dcsr, saved_dcsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) dcsr = saved_dcsr = readl_relaxed(p->base + DMA_DCSR_R);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) if (dcsr & DCSR_RUN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) dcsr = readl_relaxed(p->base + DMA_DCSR_R);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) saved_dcsr &= DCSR_RUN | DCSR_IE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) if (dcsr & DCSR_BIU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) p->dbs[0] = readl_relaxed(p->base + DMA_DBSB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) p->dbt[0] = readl_relaxed(p->base + DMA_DBTB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) p->dbs[1] = readl_relaxed(p->base + DMA_DBSA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) p->dbt[1] = readl_relaxed(p->base + DMA_DBTA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) saved_dcsr |= (dcsr & DCSR_STRTA ? DCSR_STRTB : 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) (dcsr & DCSR_STRTB ? DCSR_STRTA : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) p->dbs[0] = readl_relaxed(p->base + DMA_DBSA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) p->dbt[0] = readl_relaxed(p->base + DMA_DBTA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) p->dbs[1] = readl_relaxed(p->base + DMA_DBSB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) p->dbt[1] = readl_relaxed(p->base + DMA_DBTB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) saved_dcsr |= dcsr & (DCSR_STRTA | DCSR_STRTB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) p->dcsr = saved_dcsr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) writel(DCSR_STRTA | DCSR_STRTB, p->base + DMA_DCSR_C);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) static int sa11x0_dma_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) unsigned pch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) for (pch = 0; pch < NR_PHY_CHAN; pch++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) struct sa11x0_dma_phy *p = &d->phy[pch];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) struct sa11x0_dma_desc *txd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) u32 dcsr = readl_relaxed(p->base + DMA_DCSR_R);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) WARN_ON(dcsr & (DCSR_BIU | DCSR_STRTA | DCSR_STRTB | DCSR_RUN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) if (p->txd_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) txd = p->txd_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) else if (p->txd_load)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) txd = p->txd_load;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) if (!txd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) writel_relaxed(txd->ddar, p->base + DMA_DDAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) writel_relaxed(p->dbs[0], p->base + DMA_DBSA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) writel_relaxed(p->dbt[0], p->base + DMA_DBTA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) writel_relaxed(p->dbs[1], p->base + DMA_DBSB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) writel_relaxed(p->dbt[1], p->base + DMA_DBTB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) writel_relaxed(p->dcsr, p->base + DMA_DCSR_S);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) static const struct dev_pm_ops sa11x0_dma_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) .suspend_noirq = sa11x0_dma_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) .resume_noirq = sa11x0_dma_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) .freeze_noirq = sa11x0_dma_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) .thaw_noirq = sa11x0_dma_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) .poweroff_noirq = sa11x0_dma_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) .restore_noirq = sa11x0_dma_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) static struct platform_driver sa11x0_dma_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) .name = "sa11x0-dma",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) .pm = &sa11x0_dma_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) .probe = sa11x0_dma_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) .remove = sa11x0_dma_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) static int __init sa11x0_dma_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) return platform_driver_register(&sa11x0_dma_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) subsys_initcall(sa11x0_dma_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) static void __exit sa11x0_dma_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) platform_driver_unregister(&sa11x0_dma_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) module_exit(sa11x0_dma_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) MODULE_AUTHOR("Russell King");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) MODULE_DESCRIPTION("SA-11x0 DMA driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) MODULE_ALIAS("platform:sa11x0-dma");