^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * timb_dma.c timberdale FPGA DMA driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2010 Intel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) /* Supports:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Timberdale FPGA DMA engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/timb_dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "dmaengine.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define DRIVER_NAME "timb-dma"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /* Global DMA registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define TIMBDMA_ACR 0x34
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define TIMBDMA_32BIT_ADDR 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define TIMBDMA_ISR 0x080000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define TIMBDMA_IPR 0x080004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define TIMBDMA_IER 0x080008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /* Channel specific registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /* RX instances base addresses are 0x00, 0x40, 0x80 ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * TX instances base addresses are 0x18, 0x58, 0x98 ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define TIMBDMA_INSTANCE_OFFSET 0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define TIMBDMA_INSTANCE_TX_OFFSET 0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /* RX registers, relative the instance base */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define TIMBDMA_OFFS_RX_DHAR 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define TIMBDMA_OFFS_RX_DLAR 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define TIMBDMA_OFFS_RX_LR 0x0C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define TIMBDMA_OFFS_RX_BLR 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define TIMBDMA_OFFS_RX_ER 0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define TIMBDMA_RX_EN 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /* bytes per Row, video specific register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * which is placed after the TX registers...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define TIMBDMA_OFFS_RX_BPRR 0x30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) /* TX registers, relative the instance base */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define TIMBDMA_OFFS_TX_DHAR 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define TIMBDMA_OFFS_TX_DLAR 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define TIMBDMA_OFFS_TX_BLR 0x0C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define TIMBDMA_OFFS_TX_LR 0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define TIMB_DMA_DESC_SIZE 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct timb_dma_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct list_head desc_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct dma_async_tx_descriptor txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) u8 *desc_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) unsigned int desc_list_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) bool interrupt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct timb_dma_chan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct dma_chan chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) void __iomem *membase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) spinlock_t lock; /* Used to protect data structures,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) especially the lists and descriptors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) from races between the tasklet and calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) from above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) bool ongoing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct list_head active_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct list_head queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct list_head free_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) unsigned int bytes_per_line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) enum dma_transfer_direction direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) unsigned int descs; /* Descriptors to allocate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) unsigned int desc_elems; /* number of elems per descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct timb_dma {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct dma_device dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) void __iomem *membase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct tasklet_struct tasklet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct timb_dma_chan channels[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) static struct device *chan2dev(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) return &chan->dev->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static struct device *chan2dmadev(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) return chan2dev(chan)->parent->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static struct timb_dma *tdchantotd(struct timb_dma_chan *td_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) int id = td_chan->chan.chan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return (struct timb_dma *)((u8 *)td_chan -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /* Must be called with the spinlock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static void __td_enable_chan_irq(struct timb_dma_chan *td_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) int id = td_chan->chan.chan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct timb_dma *td = tdchantotd(td_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) u32 ier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /* enable interrupt for this channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) ier = ioread32(td->membase + TIMBDMA_IER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) ier |= 1 << id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) dev_dbg(chan2dev(&td_chan->chan), "Enabling irq: %d, IER: 0x%x\n", id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) ier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) iowrite32(ier, td->membase + TIMBDMA_IER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /* Should be called with the spinlock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static bool __td_dma_done_ack(struct timb_dma_chan *td_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) int id = td_chan->chan.chan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct timb_dma *td = (struct timb_dma *)((u8 *)td_chan -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) u32 isr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) bool done = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) dev_dbg(chan2dev(&td_chan->chan), "Checking irq: %d, td: %p\n", id, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) isr = ioread32(td->membase + TIMBDMA_ISR) & (1 << id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (isr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) iowrite32(isr, td->membase + TIMBDMA_ISR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct scatterlist *sg, bool last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) if (sg_dma_len(sg) > USHRT_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) dev_err(chan2dev(&td_chan->chan), "Too big sg element\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /* length must be word aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (sg_dma_len(sg) % sizeof(u32)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) dev_err(chan2dev(&td_chan->chan), "Incorrect length: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) sg_dma_len(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) dev_dbg(chan2dev(&td_chan->chan), "desc: %p, addr: 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) dma_desc, (unsigned long long)sg_dma_address(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) dma_desc[7] = (sg_dma_address(sg) >> 24) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) dma_desc[6] = (sg_dma_address(sg) >> 16) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) dma_desc[5] = (sg_dma_address(sg) >> 8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) dma_desc[4] = (sg_dma_address(sg) >> 0) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) dma_desc[3] = (sg_dma_len(sg) >> 8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) dma_desc[2] = (sg_dma_len(sg) >> 0) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) dma_desc[1] = 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) dma_desc[0] = 0x21 | (last ? 0x02 : 0); /* tran, valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /* Must be called with the spinlock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) static void __td_start_dma(struct timb_dma_chan *td_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) struct timb_dma_desc *td_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (td_chan->ongoing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) dev_err(chan2dev(&td_chan->chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) "Transfer already ongoing\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) desc_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) dev_dbg(chan2dev(&td_chan->chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) "td_chan: %p, chan: %d, membase: %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) td_chan, td_chan->chan.chan_id, td_chan->membase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (td_chan->direction == DMA_DEV_TO_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /* descriptor address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) iowrite32(td_desc->txd.phys, td_chan->membase +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) TIMBDMA_OFFS_RX_DLAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /* Bytes per line */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) iowrite32(td_chan->bytes_per_line, td_chan->membase +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) TIMBDMA_OFFS_RX_BPRR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /* enable RX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) iowrite32(TIMBDMA_RX_EN, td_chan->membase + TIMBDMA_OFFS_RX_ER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /* address high */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DHAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) iowrite32(td_desc->txd.phys, td_chan->membase +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) TIMBDMA_OFFS_TX_DLAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) td_chan->ongoing = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (td_desc->interrupt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) __td_enable_chan_irq(td_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) static void __td_finish(struct timb_dma_chan *td_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct dmaengine_desc_callback cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct dma_async_tx_descriptor *txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct timb_dma_desc *td_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /* can happen if the descriptor is canceled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (list_empty(&td_chan->active_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) desc_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) txd = &td_desc->txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) dev_dbg(chan2dev(&td_chan->chan), "descriptor %u complete\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) txd->cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) /* make sure to stop the transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (td_chan->direction == DMA_DEV_TO_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /* Currently no support for stopping DMA transfers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) dma_cookie_complete(txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) td_chan->ongoing = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) dmaengine_desc_get_callback(txd, &cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) list_move(&td_desc->desc_node, &td_chan->free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) dma_descriptor_unmap(txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * The API requires that no submissions are done from a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * callback, so we don't need to drop the lock here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) dmaengine_desc_callback_invoke(&cb, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static u32 __td_ier_mask(struct timb_dma *td)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) u32 ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) for (i = 0; i < td->dma.chancnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct timb_dma_chan *td_chan = td->channels + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (td_chan->ongoing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) struct timb_dma_desc *td_desc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) list_entry(td_chan->active_list.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct timb_dma_desc, desc_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (td_desc->interrupt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) ret |= 1 << i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) static void __td_start_next(struct timb_dma_chan *td_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) struct timb_dma_desc *td_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) BUG_ON(list_empty(&td_chan->queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) BUG_ON(td_chan->ongoing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) td_desc = list_entry(td_chan->queue.next, struct timb_dma_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) desc_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) dev_dbg(chan2dev(&td_chan->chan), "%s: started %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) __func__, td_desc->txd.cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) list_move(&td_desc->desc_node, &td_chan->active_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) __td_start_dma(td_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) static dma_cookie_t td_tx_submit(struct dma_async_tx_descriptor *txd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) struct timb_dma_desc *td_desc = container_of(txd, struct timb_dma_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) struct timb_dma_chan *td_chan = container_of(txd->chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) struct timb_dma_chan, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) spin_lock_bh(&td_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) cookie = dma_cookie_assign(txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (list_empty(&td_chan->active_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) dev_dbg(chan2dev(txd->chan), "%s: started %u\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) txd->cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) list_add_tail(&td_desc->desc_node, &td_chan->active_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) __td_start_dma(td_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) dev_dbg(chan2dev(txd->chan), "tx_submit: queued %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) txd->cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) list_add_tail(&td_desc->desc_node, &td_chan->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) spin_unlock_bh(&td_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) return cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) struct dma_chan *chan = &td_chan->chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) struct timb_dma_desc *td_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) td_desc = kzalloc(sizeof(struct timb_dma_desc), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (!td_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) td_desc->desc_list_len = td_chan->desc_elems * TIMB_DMA_DESC_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) td_desc->desc_list = kzalloc(td_desc->desc_list_len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (!td_desc->desc_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) dma_async_tx_descriptor_init(&td_desc->txd, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) td_desc->txd.tx_submit = td_tx_submit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) td_desc->txd.flags = DMA_CTRL_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) td_desc->txd.phys = dma_map_single(chan2dmadev(chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) td_desc->desc_list, td_desc->desc_list_len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) err = dma_mapping_error(chan2dmadev(chan), td_desc->txd.phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) dev_err(chan2dev(chan), "DMA mapping error: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) return td_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) kfree(td_desc->desc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) kfree(td_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static void td_free_desc(struct timb_dma_desc *td_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) dev_dbg(chan2dev(td_desc->txd.chan), "Freeing desc: %p\n", td_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) dma_unmap_single(chan2dmadev(td_desc->txd.chan), td_desc->txd.phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) td_desc->desc_list_len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) kfree(td_desc->desc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) kfree(td_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) static void td_desc_put(struct timb_dma_chan *td_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) struct timb_dma_desc *td_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) dev_dbg(chan2dev(&td_chan->chan), "Putting desc: %p\n", td_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) spin_lock_bh(&td_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) list_add(&td_desc->desc_node, &td_chan->free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) spin_unlock_bh(&td_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) static struct timb_dma_desc *td_desc_get(struct timb_dma_chan *td_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) struct timb_dma_desc *td_desc, *_td_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) struct timb_dma_desc *ret = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) spin_lock_bh(&td_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) list_for_each_entry_safe(td_desc, _td_desc, &td_chan->free_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) desc_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (async_tx_test_ack(&td_desc->txd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) list_del(&td_desc->desc_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) ret = td_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) dev_dbg(chan2dev(&td_chan->chan), "desc %p not ACKed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) td_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) spin_unlock_bh(&td_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) static int td_alloc_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) struct timb_dma_chan *td_chan =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) container_of(chan, struct timb_dma_chan, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) dev_dbg(chan2dev(chan), "%s: entry\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) BUG_ON(!list_empty(&td_chan->free_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) for (i = 0; i < td_chan->descs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) struct timb_dma_desc *td_desc = td_alloc_init_desc(td_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (!td_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) dev_err(chan2dev(chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) "Couldn't allocate any descriptors\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) td_desc_put(td_chan, td_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) spin_lock_bh(&td_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) dma_cookie_init(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) spin_unlock_bh(&td_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) static void td_free_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) struct timb_dma_chan *td_chan =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) container_of(chan, struct timb_dma_chan, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) struct timb_dma_desc *td_desc, *_td_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) LIST_HEAD(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) /* check that all descriptors are free */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) BUG_ON(!list_empty(&td_chan->active_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) BUG_ON(!list_empty(&td_chan->queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) spin_lock_bh(&td_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) list_splice_init(&td_chan->free_list, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) spin_unlock_bh(&td_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) list_for_each_entry_safe(td_desc, _td_desc, &list, desc_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) dev_dbg(chan2dev(chan), "%s: Freeing desc: %p\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) td_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) td_free_desc(td_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) static enum dma_status td_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) struct dma_tx_state *txstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) enum dma_status ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) ret = dma_cookie_status(chan, cookie, txstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) dev_dbg(chan2dev(chan), "%s: exit, ret: %d\n", __func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) static void td_issue_pending(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) struct timb_dma_chan *td_chan =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) container_of(chan, struct timb_dma_chan, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) spin_lock_bh(&td_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (!list_empty(&td_chan->active_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) /* transfer ongoing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (__td_dma_done_ack(td_chan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) __td_finish(td_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (list_empty(&td_chan->active_list) && !list_empty(&td_chan->queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) __td_start_next(td_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) spin_unlock_bh(&td_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) struct scatterlist *sgl, unsigned int sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) enum dma_transfer_direction direction, unsigned long flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) struct timb_dma_chan *td_chan =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) container_of(chan, struct timb_dma_chan, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) struct timb_dma_desc *td_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) unsigned int desc_usage = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) if (!sgl || !sg_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) dev_err(chan2dev(chan), "%s: No SG list\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) /* even channels are for RX, odd for TX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (td_chan->direction != direction) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) dev_err(chan2dev(chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) "Requesting channel in wrong direction\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) td_desc = td_desc_get(td_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (!td_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) dev_err(chan2dev(chan), "Not enough descriptors available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) td_desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) for_each_sg(sgl, sg, sg_len, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) if (desc_usage > td_desc->desc_list_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) dev_err(chan2dev(chan), "No descriptor space\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) err = td_fill_desc(td_chan, td_desc->desc_list + desc_usage, sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) i == (sg_len - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) dev_err(chan2dev(chan), "Failed to update desc: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) td_desc_put(td_chan, td_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) desc_usage += TIMB_DMA_DESC_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) td_desc->desc_list_len, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) return &td_desc->txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) static int td_terminate_all(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) struct timb_dma_chan *td_chan =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) container_of(chan, struct timb_dma_chan, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) struct timb_dma_desc *td_desc, *_td_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) /* first the easy part, put the queue into the free list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) spin_lock_bh(&td_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) desc_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) list_move(&td_desc->desc_node, &td_chan->free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) /* now tear down the running */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) __td_finish(td_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) spin_unlock_bh(&td_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) static void td_tasklet(struct tasklet_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) struct timb_dma *td = from_tasklet(td, t, tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) u32 isr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) u32 ipr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) u32 ier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) isr = ioread32(td->membase + TIMBDMA_ISR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) ipr = isr & __td_ier_mask(td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) /* ack the interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) iowrite32(ipr, td->membase + TIMBDMA_ISR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) for (i = 0; i < td->dma.chancnt; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) if (ipr & (1 << i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) struct timb_dma_chan *td_chan = td->channels + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) spin_lock(&td_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) __td_finish(td_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (!list_empty(&td_chan->queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) __td_start_next(td_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) spin_unlock(&td_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) ier = __td_ier_mask(td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) iowrite32(ier, td->membase + TIMBDMA_IER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) static irqreturn_t td_irq(int irq, void *devid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) struct timb_dma *td = devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) u32 ipr = ioread32(td->membase + TIMBDMA_IPR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (ipr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) /* disable interrupts, will be re-enabled in tasklet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) iowrite32(0, td->membase + TIMBDMA_IER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) tasklet_schedule(&td->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) static int td_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) struct timb_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) struct timb_dma *td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) struct resource *iomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if (!pdata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) dev_err(&pdev->dev, "No platform data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) if (!iomem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) if (!request_mem_region(iomem->start, resource_size(iomem),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) DRIVER_NAME))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) td = kzalloc(struct_size(td, channels, pdata->nr_channels),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (!td) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) goto err_release_region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) dev_dbg(&pdev->dev, "Allocated TD: %p\n", td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) td->membase = ioremap(iomem->start, resource_size(iomem));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) if (!td->membase) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) dev_err(&pdev->dev, "Failed to remap I/O memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) goto err_free_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) /* 32bit addressing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) iowrite32(TIMBDMA_32BIT_ADDR, td->membase + TIMBDMA_ACR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) /* disable and clear any interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) iowrite32(0x0, td->membase + TIMBDMA_IER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) iowrite32(0xFFFFFFFF, td->membase + TIMBDMA_ISR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) tasklet_setup(&td->tasklet, td_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) err = request_irq(irq, td_irq, IRQF_SHARED, DRIVER_NAME, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) dev_err(&pdev->dev, "Failed to request IRQ\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) goto err_tasklet_kill;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) td->dma.device_alloc_chan_resources = td_alloc_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) td->dma.device_free_chan_resources = td_free_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) td->dma.device_tx_status = td_tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) td->dma.device_issue_pending = td_issue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) dma_cap_set(DMA_SLAVE, td->dma.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) dma_cap_set(DMA_PRIVATE, td->dma.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) td->dma.device_prep_slave_sg = td_prep_slave_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) td->dma.device_terminate_all = td_terminate_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) td->dma.dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) INIT_LIST_HEAD(&td->dma.channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) for (i = 0; i < pdata->nr_channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) struct timb_dma_chan *td_chan = &td->channels[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) struct timb_dma_platform_data_channel *pchan =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) pdata->channels + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) /* even channels are RX, odd are TX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) if ((i % 2) == pchan->rx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) dev_err(&pdev->dev, "Wrong channel configuration\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) goto err_free_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) td_chan->chan.device = &td->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) dma_cookie_init(&td_chan->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) spin_lock_init(&td_chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) INIT_LIST_HEAD(&td_chan->active_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) INIT_LIST_HEAD(&td_chan->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) INIT_LIST_HEAD(&td_chan->free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) td_chan->descs = pchan->descriptors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) td_chan->desc_elems = pchan->descriptor_elements;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) td_chan->bytes_per_line = pchan->bytes_per_line;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) td_chan->direction = pchan->rx ? DMA_DEV_TO_MEM :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) DMA_MEM_TO_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) td_chan->membase = td->membase +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) (i / 2) * TIMBDMA_INSTANCE_OFFSET +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) (pchan->rx ? 0 : TIMBDMA_INSTANCE_TX_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) dev_dbg(&pdev->dev, "Chan: %d, membase: %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) i, td_chan->membase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) list_add_tail(&td_chan->chan.device_node, &td->dma.channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) err = dma_async_device_register(&td->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) dev_err(&pdev->dev, "Failed to register async device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) goto err_free_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) platform_set_drvdata(pdev, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) dev_dbg(&pdev->dev, "Probe result: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) err_free_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) free_irq(irq, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) err_tasklet_kill:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) tasklet_kill(&td->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) iounmap(td->membase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) err_free_mem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) kfree(td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) err_release_region:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) release_mem_region(iomem->start, resource_size(iomem));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) static int td_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) struct timb_dma *td = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) int irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) dma_async_device_unregister(&td->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) free_irq(irq, td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) tasklet_kill(&td->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) iounmap(td->membase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) kfree(td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) release_mem_region(iomem->start, resource_size(iomem));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) dev_dbg(&pdev->dev, "Removed...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) static struct platform_driver td_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) .name = DRIVER_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) .probe = td_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) .remove = td_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) module_platform_driver(td_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) MODULE_DESCRIPTION("Timberdale DMA controller driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) MODULE_AUTHOR("Pelagicore AB <info@pelagicore.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) MODULE_ALIAS("platform:"DRIVER_NAME);