^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Driver for the TXx9 SoC DMA Controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2009 Atsushi Nemoto
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "dmaengine.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "txx9dmac.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) static struct txx9dmac_chan *to_txx9dmac_chan(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) return container_of(chan, struct txx9dmac_chan, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) static struct txx9dmac_cregs __iomem *__dma_regs(const struct txx9dmac_chan *dc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) return dc->ch_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static struct txx9dmac_cregs32 __iomem *__dma_regs32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) const struct txx9dmac_chan *dc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) return dc->ch_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define channel64_readq(dc, name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) __raw_readq(&(__dma_regs(dc)->name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define channel64_writeq(dc, name, val) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) __raw_writeq((val), &(__dma_regs(dc)->name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define channel64_readl(dc, name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) __raw_readl(&(__dma_regs(dc)->name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define channel64_writel(dc, name, val) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) __raw_writel((val), &(__dma_regs(dc)->name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define channel32_readl(dc, name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) __raw_readl(&(__dma_regs32(dc)->name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define channel32_writel(dc, name, val) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) __raw_writel((val), &(__dma_regs32(dc)->name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define channel_readq(dc, name) channel64_readq(dc, name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define channel_writeq(dc, name, val) channel64_writeq(dc, name, val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define channel_readl(dc, name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) (is_dmac64(dc) ? \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) channel64_readl(dc, name) : channel32_readl(dc, name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define channel_writel(dc, name, val) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) (is_dmac64(dc) ? \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) channel64_writel(dc, name, val) : channel32_writel(dc, name, val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static dma_addr_t channel64_read_CHAR(const struct txx9dmac_chan *dc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) if (sizeof(__dma_regs(dc)->CHAR) == sizeof(u64))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) return channel64_readq(dc, CHAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return channel64_readl(dc, CHAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static void channel64_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (sizeof(__dma_regs(dc)->CHAR) == sizeof(u64))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) channel64_writeq(dc, CHAR, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) channel64_writel(dc, CHAR, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static void channel64_clear_CHAR(const struct txx9dmac_chan *dc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #if defined(CONFIG_32BIT) && !defined(CONFIG_PHYS_ADDR_T_64BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) channel64_writel(dc, CHAR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) channel64_writel(dc, __pad_CHAR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) channel64_writeq(dc, CHAR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static dma_addr_t channel_read_CHAR(const struct txx9dmac_chan *dc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (is_dmac64(dc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return channel64_read_CHAR(dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return channel32_readl(dc, CHAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static void channel_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (is_dmac64(dc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) channel64_write_CHAR(dc, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) channel32_writel(dc, CHAR, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static struct txx9dmac_regs __iomem *__txx9dmac_regs(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) const struct txx9dmac_dev *ddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) return ddev->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static struct txx9dmac_regs32 __iomem *__txx9dmac_regs32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) const struct txx9dmac_dev *ddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return ddev->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define dma64_readl(ddev, name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) __raw_readl(&(__txx9dmac_regs(ddev)->name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define dma64_writel(ddev, name, val) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) __raw_writel((val), &(__txx9dmac_regs(ddev)->name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define dma32_readl(ddev, name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) __raw_readl(&(__txx9dmac_regs32(ddev)->name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define dma32_writel(ddev, name, val) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) __raw_writel((val), &(__txx9dmac_regs32(ddev)->name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define dma_readl(ddev, name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) (__is_dmac64(ddev) ? \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) dma64_readl(ddev, name) : dma32_readl(ddev, name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define dma_writel(ddev, name, val) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) (__is_dmac64(ddev) ? \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) dma64_writel(ddev, name, val) : dma32_writel(ddev, name, val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static struct device *chan2dev(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return &chan->dev->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static struct device *chan2parent(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return chan->dev->device.parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static struct txx9dmac_desc *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) txd_to_txx9dmac_desc(struct dma_async_tx_descriptor *txd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return container_of(txd, struct txx9dmac_desc, txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static dma_addr_t desc_read_CHAR(const struct txx9dmac_chan *dc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) const struct txx9dmac_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) return is_dmac64(dc) ? desc->hwdesc.CHAR : desc->hwdesc32.CHAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) static void desc_write_CHAR(const struct txx9dmac_chan *dc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct txx9dmac_desc *desc, dma_addr_t val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (is_dmac64(dc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) desc->hwdesc.CHAR = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) desc->hwdesc32.CHAR = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #define TXX9_DMA_MAX_COUNT 0x04000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #define TXX9_DMA_INITIAL_DESC_COUNT 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) static struct txx9dmac_desc *txx9dmac_first_active(struct txx9dmac_chan *dc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) return list_entry(dc->active_list.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct txx9dmac_desc, desc_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static struct txx9dmac_desc *txx9dmac_last_active(struct txx9dmac_chan *dc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) return list_entry(dc->active_list.prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct txx9dmac_desc, desc_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static struct txx9dmac_desc *txx9dmac_first_queued(struct txx9dmac_chan *dc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) return list_entry(dc->queue.next, struct txx9dmac_desc, desc_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static struct txx9dmac_desc *txx9dmac_last_child(struct txx9dmac_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (!list_empty(&desc->tx_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) desc = list_entry(desc->tx_list.prev, typeof(*desc), desc_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) static struct txx9dmac_desc *txx9dmac_desc_alloc(struct txx9dmac_chan *dc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct txx9dmac_dev *ddev = dc->ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct txx9dmac_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) desc = kzalloc(sizeof(*desc), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) INIT_LIST_HEAD(&desc->tx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) dma_async_tx_descriptor_init(&desc->txd, &dc->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) desc->txd.tx_submit = txx9dmac_tx_submit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /* txd.flags will be overwritten in prep funcs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) desc->txd.flags = DMA_CTRL_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) desc->txd.phys = dma_map_single(chan2parent(&dc->chan), &desc->hwdesc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) ddev->descsize, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) return desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) static struct txx9dmac_desc *txx9dmac_desc_get(struct txx9dmac_chan *dc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) struct txx9dmac_desc *desc, *_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) struct txx9dmac_desc *ret = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) unsigned int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) spin_lock_bh(&dc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) list_for_each_entry_safe(desc, _desc, &dc->free_list, desc_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (async_tx_test_ack(&desc->txd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) list_del(&desc->desc_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) ret = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) dev_dbg(chan2dev(&dc->chan), "desc %p not ACKed\n", desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) spin_unlock_bh(&dc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) dev_vdbg(chan2dev(&dc->chan), "scanned %u descriptors on freelist\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) ret = txx9dmac_desc_alloc(dc, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) spin_lock_bh(&dc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) dc->descs_allocated++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) spin_unlock_bh(&dc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) dev_err(chan2dev(&dc->chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) "not enough descriptors available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) static void txx9dmac_sync_desc_for_cpu(struct txx9dmac_chan *dc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct txx9dmac_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct txx9dmac_dev *ddev = dc->ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct txx9dmac_desc *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) list_for_each_entry(child, &desc->tx_list, desc_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) dma_sync_single_for_cpu(chan2parent(&dc->chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) child->txd.phys, ddev->descsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) dma_sync_single_for_cpu(chan2parent(&dc->chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) desc->txd.phys, ddev->descsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * Move a descriptor, including any children, to the free list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * `desc' must not be on any lists.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) static void txx9dmac_desc_put(struct txx9dmac_chan *dc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) struct txx9dmac_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct txx9dmac_desc *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) txx9dmac_sync_desc_for_cpu(dc, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) spin_lock_bh(&dc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) list_for_each_entry(child, &desc->tx_list, desc_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) dev_vdbg(chan2dev(&dc->chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) "moving child desc %p to freelist\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) list_splice_init(&desc->tx_list, &dc->free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) dev_vdbg(chan2dev(&dc->chan), "moving desc %p to freelist\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) list_add(&desc->desc_node, &dc->free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) spin_unlock_bh(&dc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /*----------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static void txx9dmac_dump_regs(struct txx9dmac_chan *dc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (is_dmac64(dc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) dev_err(chan2dev(&dc->chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) " CHAR: %#llx SAR: %#llx DAR: %#llx CNTR: %#x"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) " SAIR: %#x DAIR: %#x CCR: %#x CSR: %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) (u64)channel64_read_CHAR(dc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) channel64_readq(dc, SAR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) channel64_readq(dc, DAR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) channel64_readl(dc, CNTR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) channel64_readl(dc, SAIR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) channel64_readl(dc, DAIR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) channel64_readl(dc, CCR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) channel64_readl(dc, CSR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) dev_err(chan2dev(&dc->chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) " CHAR: %#x SAR: %#x DAR: %#x CNTR: %#x"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) " SAIR: %#x DAIR: %#x CCR: %#x CSR: %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) channel32_readl(dc, CHAR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) channel32_readl(dc, SAR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) channel32_readl(dc, DAR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) channel32_readl(dc, CNTR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) channel32_readl(dc, SAIR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) channel32_readl(dc, DAIR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) channel32_readl(dc, CCR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) channel32_readl(dc, CSR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) static void txx9dmac_reset_chan(struct txx9dmac_chan *dc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) channel_writel(dc, CCR, TXX9_DMA_CCR_CHRST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (is_dmac64(dc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) channel64_clear_CHAR(dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) channel_writeq(dc, SAR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) channel_writeq(dc, DAR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) channel_writel(dc, CHAR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) channel_writel(dc, SAR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) channel_writel(dc, DAR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) channel_writel(dc, CNTR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) channel_writel(dc, SAIR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) channel_writel(dc, DAIR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) channel_writel(dc, CCR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) /* Called with dc->lock held and bh disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) static void txx9dmac_dostart(struct txx9dmac_chan *dc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) struct txx9dmac_desc *first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) struct txx9dmac_slave *ds = dc->chan.private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) u32 sai, dai;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) dev_vdbg(chan2dev(&dc->chan), "dostart %u %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) first->txd.cookie, first);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) /* ASSERT: channel is idle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) dev_err(chan2dev(&dc->chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) "BUG: Attempted to start non-idle channel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) txx9dmac_dump_regs(dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /* The tasklet will hopefully advance the queue... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (is_dmac64(dc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) channel64_writel(dc, CNTR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) channel64_writel(dc, CSR, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (ds) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (ds->tx_reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) sai = ds->reg_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) dai = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) sai = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) dai = ds->reg_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) sai = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) dai = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) channel64_writel(dc, SAIR, sai);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) channel64_writel(dc, DAIR, dai);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) /* All 64-bit DMAC supports SMPCHN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) channel64_writel(dc, CCR, dc->ccr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) /* Writing a non zero value to CHAR will assert XFACT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) channel64_write_CHAR(dc, first->txd.phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) channel32_writel(dc, CNTR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) channel32_writel(dc, CSR, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (ds) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (ds->tx_reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) sai = ds->reg_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) dai = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) sai = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) dai = ds->reg_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) sai = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) dai = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) channel32_writel(dc, SAIR, sai);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) channel32_writel(dc, DAIR, dai);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) if (txx9_dma_have_SMPCHN()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) channel32_writel(dc, CCR, dc->ccr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) /* Writing a non zero value to CHAR will assert XFACT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) channel32_writel(dc, CHAR, first->txd.phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) channel32_writel(dc, CHAR, first->txd.phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) channel32_writel(dc, CCR, dc->ccr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /*----------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) struct txx9dmac_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) struct dmaengine_desc_callback cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) struct dma_async_tx_descriptor *txd = &desc->txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) txd->cookie, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) dma_cookie_complete(txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) dmaengine_desc_get_callback(txd, &cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) txx9dmac_sync_desc_for_cpu(dc, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) list_splice_init(&desc->tx_list, &dc->free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) list_move(&desc->desc_node, &dc->free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) dma_descriptor_unmap(txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * The API requires that no submissions are done from a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * callback, so we don't need to drop the lock here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) dmaengine_desc_callback_invoke(&cb, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) dma_run_dependencies(txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) static void txx9dmac_dequeue(struct txx9dmac_chan *dc, struct list_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) struct txx9dmac_dev *ddev = dc->ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) struct txx9dmac_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) struct txx9dmac_desc *prev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) BUG_ON(!list_empty(list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) desc = txx9dmac_first_queued(dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) if (prev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) desc_write_CHAR(dc, prev, desc->txd.phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) dma_sync_single_for_device(chan2parent(&dc->chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) prev->txd.phys, ddev->descsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) prev = txx9dmac_last_child(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) list_move_tail(&desc->desc_node, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) /* Make chain-completion interrupt happen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if ((desc->txd.flags & DMA_PREP_INTERRUPT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) !txx9dmac_chan_INTENT(dc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) } while (!list_empty(&dc->queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) static void txx9dmac_complete_all(struct txx9dmac_chan *dc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) struct txx9dmac_desc *desc, *_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) LIST_HEAD(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * Submit queued descriptors ASAP, i.e. before we go through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * the completed ones.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) list_splice_init(&dc->active_list, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (!list_empty(&dc->queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) txx9dmac_dequeue(dc, &dc->active_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) txx9dmac_dostart(dc, txx9dmac_first_active(dc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) list_for_each_entry_safe(desc, _desc, &list, desc_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) txx9dmac_descriptor_complete(dc, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) static void txx9dmac_dump_desc(struct txx9dmac_chan *dc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) struct txx9dmac_hwdesc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) if (is_dmac64(dc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) #ifdef TXX9_DMA_USE_SIMPLE_CHAIN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) dev_crit(chan2dev(&dc->chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) " desc: ch%#llx s%#llx d%#llx c%#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) (u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) dev_crit(chan2dev(&dc->chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) " desc: ch%#llx s%#llx d%#llx c%#x"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) " si%#x di%#x cc%#x cs%#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) (u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) desc->SAIR, desc->DAIR, desc->CCR, desc->CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) struct txx9dmac_hwdesc32 *d = (struct txx9dmac_hwdesc32 *)desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) #ifdef TXX9_DMA_USE_SIMPLE_CHAIN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) dev_crit(chan2dev(&dc->chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) " desc: ch%#x s%#x d%#x c%#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) d->CHAR, d->SAR, d->DAR, d->CNTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) dev_crit(chan2dev(&dc->chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) " desc: ch%#x s%#x d%#x c%#x"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) " si%#x di%#x cc%#x cs%#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) d->CHAR, d->SAR, d->DAR, d->CNTR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) d->SAIR, d->DAIR, d->CCR, d->CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) static void txx9dmac_handle_error(struct txx9dmac_chan *dc, u32 csr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) struct txx9dmac_desc *bad_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) struct txx9dmac_desc *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) u32 errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * The descriptor currently at the head of the active list is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * borked. Since we don't have any way to report errors, we'll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * just have to scream loudly and try to carry on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) dev_crit(chan2dev(&dc->chan), "Abnormal Chain Completion\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) txx9dmac_dump_regs(dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) bad_desc = txx9dmac_first_active(dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) list_del_init(&bad_desc->desc_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) /* Clear all error flags and try to restart the controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) errors = csr & (TXX9_DMA_CSR_ABCHC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) TXX9_DMA_CSR_CFERR | TXX9_DMA_CSR_CHERR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) TXX9_DMA_CSR_DESERR | TXX9_DMA_CSR_SORERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) channel_writel(dc, CSR, errors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (list_empty(&dc->active_list) && !list_empty(&dc->queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) txx9dmac_dequeue(dc, &dc->active_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) if (!list_empty(&dc->active_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) txx9dmac_dostart(dc, txx9dmac_first_active(dc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) dev_crit(chan2dev(&dc->chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) "Bad descriptor submitted for DMA! (cookie: %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) bad_desc->txd.cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) txx9dmac_dump_desc(dc, &bad_desc->hwdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) list_for_each_entry(child, &bad_desc->tx_list, desc_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) txx9dmac_dump_desc(dc, &child->hwdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) /* Pretend the descriptor completed successfully */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) txx9dmac_descriptor_complete(dc, bad_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) static void txx9dmac_scan_descriptors(struct txx9dmac_chan *dc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) dma_addr_t chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) struct txx9dmac_desc *desc, *_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) struct txx9dmac_desc *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) u32 csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (is_dmac64(dc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) chain = channel64_read_CHAR(dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) csr = channel64_readl(dc, CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) channel64_writel(dc, CSR, csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) chain = channel32_readl(dc, CHAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) csr = channel32_readl(dc, CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) channel32_writel(dc, CSR, csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) /* For dynamic chain, we should look at XFACT instead of NCHNC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (!(csr & (TXX9_DMA_CSR_XFACT | TXX9_DMA_CSR_ABCHC))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) /* Everything we've submitted is done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) txx9dmac_complete_all(dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (!(csr & TXX9_DMA_CSR_CHNEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) chain = 0; /* last descriptor of this chain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) dev_vdbg(chan2dev(&dc->chan), "scan_descriptors: char=%#llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) (u64)chain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) list_for_each_entry_safe(desc, _desc, &dc->active_list, desc_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (desc_read_CHAR(dc, desc) == chain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) /* This one is currently in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (csr & TXX9_DMA_CSR_ABCHC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) goto scan_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) list_for_each_entry(child, &desc->tx_list, desc_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (desc_read_CHAR(dc, child) == chain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) /* Currently in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) if (csr & TXX9_DMA_CSR_ABCHC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) goto scan_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * No descriptors so far seem to be in progress, i.e.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * this one must be done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) txx9dmac_descriptor_complete(dc, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) scan_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) if (csr & TXX9_DMA_CSR_ABCHC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) txx9dmac_handle_error(dc, csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) dev_err(chan2dev(&dc->chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) "BUG: All descriptors done, but channel not idle!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) /* Try to continue after resetting the channel... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) txx9dmac_reset_chan(dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (!list_empty(&dc->queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) txx9dmac_dequeue(dc, &dc->active_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) txx9dmac_dostart(dc, txx9dmac_first_active(dc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) static void txx9dmac_chan_tasklet(struct tasklet_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) u32 csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) struct txx9dmac_chan *dc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) dc = from_tasklet(dc, t, tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) csr = channel_readl(dc, CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) dev_vdbg(chan2dev(&dc->chan), "tasklet: status=%x\n", csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) spin_lock(&dc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) if (csr & (TXX9_DMA_CSR_ABCHC | TXX9_DMA_CSR_NCHNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) TXX9_DMA_CSR_NTRNFC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) txx9dmac_scan_descriptors(dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) spin_unlock(&dc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) irq = dc->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) enable_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) static irqreturn_t txx9dmac_chan_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) struct txx9dmac_chan *dc = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) dev_vdbg(chan2dev(&dc->chan), "interrupt: status=%#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) channel_readl(dc, CSR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) tasklet_schedule(&dc->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) * Just disable the interrupts. We'll turn them back on in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) * softirq handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) disable_irq_nosync(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) static void txx9dmac_tasklet(struct tasklet_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) u32 csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) struct txx9dmac_chan *dc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) struct txx9dmac_dev *ddev = from_tasklet(ddev, t, tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) u32 mcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) mcr = dma_readl(ddev, MCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) dev_vdbg(ddev->chan[0]->dma.dev, "tasklet: mcr=%x\n", mcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) for (i = 0; i < TXX9_DMA_MAX_NR_CHANNELS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) if ((mcr >> (24 + i)) & 0x11) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) dc = ddev->chan[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) csr = channel_readl(dc, CSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) dev_vdbg(chan2dev(&dc->chan), "tasklet: status=%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) spin_lock(&dc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) if (csr & (TXX9_DMA_CSR_ABCHC | TXX9_DMA_CSR_NCHNC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) TXX9_DMA_CSR_NTRNFC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) txx9dmac_scan_descriptors(dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) spin_unlock(&dc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) irq = ddev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) enable_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) static irqreturn_t txx9dmac_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) struct txx9dmac_dev *ddev = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) dev_vdbg(ddev->chan[0]->dma.dev, "interrupt: status=%#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) dma_readl(ddev, MCR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) tasklet_schedule(&ddev->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * Just disable the interrupts. We'll turn them back on in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * softirq handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) disable_irq_nosync(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) /*----------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) struct txx9dmac_desc *desc = txd_to_txx9dmac_desc(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) struct txx9dmac_chan *dc = to_txx9dmac_chan(tx->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) spin_lock_bh(&dc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) cookie = dma_cookie_assign(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) desc->txd.cookie, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) list_add_tail(&desc->desc_node, &dc->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) spin_unlock_bh(&dc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) return cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) txx9dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) size_t len, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) struct txx9dmac_dev *ddev = dc->ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) struct txx9dmac_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) struct txx9dmac_desc *first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) struct txx9dmac_desc *prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) size_t xfer_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) size_t offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) dev_vdbg(chan2dev(chan), "prep_dma_memcpy d%#llx s%#llx l%#zx f%#lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) (u64)dest, (u64)src, len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) if (unlikely(!len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) prev = first = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) for (offset = 0; offset < len; offset += xfer_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) xfer_count = min_t(size_t, len - offset, TXX9_DMA_MAX_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) * Workaround for ERT-TX49H2-033, ERT-TX49H3-020,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * ERT-TX49H4-016 (slightly conservative)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) if (__is_dmac64(ddev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) if (xfer_count > 0x100 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) (xfer_count & 0xff) >= 0xfa &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) (xfer_count & 0xff) <= 0xff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) xfer_count -= 0x20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) if (xfer_count > 0x80 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) (xfer_count & 0x7f) >= 0x7e &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) (xfer_count & 0x7f) <= 0x7f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) xfer_count -= 0x20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) desc = txx9dmac_desc_get(dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (!desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) txx9dmac_desc_put(dc, first);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if (__is_dmac64(ddev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) desc->hwdesc.SAR = src + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) desc->hwdesc.DAR = dest + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) desc->hwdesc.CNTR = xfer_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) txx9dmac_desc_set_nosimple(ddev, desc, 8, 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) dc->ccr | TXX9_DMA_CCR_XFACT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) desc->hwdesc32.SAR = src + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) desc->hwdesc32.DAR = dest + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) desc->hwdesc32.CNTR = xfer_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) txx9dmac_desc_set_nosimple(ddev, desc, 4, 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) dc->ccr | TXX9_DMA_CCR_XFACT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * The descriptors on tx_list are not reachable from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) * the dc->queue list or dc->active_list after a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) * submit. If we put all descriptors on active_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) * calling of callback on the completion will be more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) * complex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) if (!first) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) first = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) desc_write_CHAR(dc, prev, desc->txd.phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) dma_sync_single_for_device(chan2parent(&dc->chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) prev->txd.phys, ddev->descsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) list_add_tail(&desc->desc_node, &first->tx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) prev = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) /* Trigger interrupt after last block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (flags & DMA_PREP_INTERRUPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) txx9dmac_desc_set_INTENT(ddev, prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) desc_write_CHAR(dc, prev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) dma_sync_single_for_device(chan2parent(&dc->chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) prev->txd.phys, ddev->descsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) first->txd.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) first->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) return &first->txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) unsigned int sg_len, enum dma_transfer_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) unsigned long flags, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) struct txx9dmac_dev *ddev = dc->ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) struct txx9dmac_slave *ds = chan->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) struct txx9dmac_desc *prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) struct txx9dmac_desc *first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) dev_vdbg(chan2dev(chan), "prep_dma_slave\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) BUG_ON(!ds || !ds->reg_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) if (ds->tx_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) BUG_ON(direction != DMA_MEM_TO_DEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) BUG_ON(direction != DMA_DEV_TO_MEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if (unlikely(!sg_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) prev = first = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) for_each_sg(sgl, sg, sg_len, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) struct txx9dmac_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) dma_addr_t mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) u32 sai, dai;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) desc = txx9dmac_desc_get(dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (!desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) txx9dmac_desc_put(dc, first);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) mem = sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) if (__is_dmac64(ddev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (direction == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) desc->hwdesc.SAR = mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) desc->hwdesc.DAR = ds->tx_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) desc->hwdesc.SAR = ds->rx_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) desc->hwdesc.DAR = mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) desc->hwdesc.CNTR = sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) if (direction == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) desc->hwdesc32.SAR = mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) desc->hwdesc32.DAR = ds->tx_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) desc->hwdesc32.SAR = ds->rx_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) desc->hwdesc32.DAR = mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) desc->hwdesc32.CNTR = sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if (direction == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) sai = ds->reg_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) dai = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) sai = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) dai = ds->reg_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) txx9dmac_desc_set_nosimple(ddev, desc, sai, dai,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) dc->ccr | TXX9_DMA_CCR_XFACT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) if (!first) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) first = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) desc_write_CHAR(dc, prev, desc->txd.phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) dma_sync_single_for_device(chan2parent(&dc->chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) prev->txd.phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) ddev->descsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) list_add_tail(&desc->desc_node, &first->tx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) prev = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) /* Trigger interrupt after last block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) if (flags & DMA_PREP_INTERRUPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) txx9dmac_desc_set_INTENT(ddev, prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) desc_write_CHAR(dc, prev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) dma_sync_single_for_device(chan2parent(&dc->chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) prev->txd.phys, ddev->descsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) first->txd.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) first->len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) return &first->txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) static int txx9dmac_terminate_all(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) struct txx9dmac_desc *desc, *_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) LIST_HEAD(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) dev_vdbg(chan2dev(chan), "terminate_all\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) spin_lock_bh(&dc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) txx9dmac_reset_chan(dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) /* active_list entries will end up before queued entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) list_splice_init(&dc->queue, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) list_splice_init(&dc->active_list, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) spin_unlock_bh(&dc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) /* Flush all pending and queued descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) list_for_each_entry_safe(desc, _desc, &list, desc_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) txx9dmac_descriptor_complete(dc, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) static enum dma_status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) struct dma_tx_state *txstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) enum dma_status ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) ret = dma_cookie_status(chan, cookie, txstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) if (ret == DMA_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) return DMA_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) spin_lock_bh(&dc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) txx9dmac_scan_descriptors(dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) spin_unlock_bh(&dc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) return dma_cookie_status(chan, cookie, txstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) static void txx9dmac_chain_dynamic(struct txx9dmac_chan *dc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) struct txx9dmac_desc *prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) struct txx9dmac_dev *ddev = dc->ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) struct txx9dmac_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) LIST_HEAD(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) prev = txx9dmac_last_child(prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) txx9dmac_dequeue(dc, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) desc = list_entry(list.next, struct txx9dmac_desc, desc_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) desc_write_CHAR(dc, prev, desc->txd.phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) dma_sync_single_for_device(chan2parent(&dc->chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) prev->txd.phys, ddev->descsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) if (!(channel_readl(dc, CSR) & TXX9_DMA_CSR_CHNEN) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) channel_read_CHAR(dc) == prev->txd.phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) /* Restart chain DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) channel_write_CHAR(dc, desc->txd.phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) list_splice_tail(&list, &dc->active_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) static void txx9dmac_issue_pending(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) spin_lock_bh(&dc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) if (!list_empty(&dc->active_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) txx9dmac_scan_descriptors(dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) if (!list_empty(&dc->queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) if (list_empty(&dc->active_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) txx9dmac_dequeue(dc, &dc->active_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) txx9dmac_dostart(dc, txx9dmac_first_active(dc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) } else if (txx9_dma_have_SMPCHN()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) struct txx9dmac_desc *prev = txx9dmac_last_active(dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) if (!(prev->txd.flags & DMA_PREP_INTERRUPT) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) txx9dmac_chan_INTENT(dc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) txx9dmac_chain_dynamic(dc, prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) spin_unlock_bh(&dc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) static int txx9dmac_alloc_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) struct txx9dmac_slave *ds = chan->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) struct txx9dmac_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) /* ASSERT: channel is idle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) if (channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) dma_cookie_init(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) dc->ccr = TXX9_DMA_CCR_IMMCHN | TXX9_DMA_CCR_INTENE | CCR_LE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) txx9dmac_chan_set_SMPCHN(dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) if (!txx9_dma_have_SMPCHN() || (dc->ccr & TXX9_DMA_CCR_SMPCHN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) dc->ccr |= TXX9_DMA_CCR_INTENC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) if (chan->device->device_prep_dma_memcpy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) if (ds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) dc->ccr |= TXX9_DMA_CCR_XFSZ_X8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (!ds ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) (ds->tx_reg && ds->rx_reg) || (!ds->tx_reg && !ds->rx_reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) dc->ccr |= TXX9_DMA_CCR_EXTRQ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) TXX9_DMA_CCR_XFSZ(__ffs(ds->reg_width));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) txx9dmac_chan_set_INTENT(dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) spin_lock_bh(&dc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) i = dc->descs_allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) while (dc->descs_allocated < TXX9_DMA_INITIAL_DESC_COUNT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) spin_unlock_bh(&dc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) desc = txx9dmac_desc_alloc(dc, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) if (!desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) dev_info(chan2dev(chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) "only allocated %d descriptors\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) spin_lock_bh(&dc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) txx9dmac_desc_put(dc, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) spin_lock_bh(&dc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) i = ++dc->descs_allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) spin_unlock_bh(&dc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) dev_dbg(chan2dev(chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) "alloc_chan_resources allocated %d descriptors\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) static void txx9dmac_free_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) struct txx9dmac_dev *ddev = dc->ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) struct txx9dmac_desc *desc, *_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) LIST_HEAD(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) dc->descs_allocated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) /* ASSERT: channel is idle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) BUG_ON(!list_empty(&dc->active_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) BUG_ON(!list_empty(&dc->queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) BUG_ON(channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) spin_lock_bh(&dc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) list_splice_init(&dc->free_list, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) dc->descs_allocated = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) spin_unlock_bh(&dc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) list_for_each_entry_safe(desc, _desc, &list, desc_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) dma_unmap_single(chan2parent(chan), desc->txd.phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) ddev->descsize, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) kfree(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) dev_vdbg(chan2dev(chan), "free_chan_resources done\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) /*----------------------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) static void txx9dmac_off(struct txx9dmac_dev *ddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) dma_writel(ddev, MCR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) static int __init txx9dmac_chan_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) struct txx9dmac_chan_platform_data *cpdata =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) dev_get_platdata(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) struct platform_device *dmac_dev = cpdata->dmac_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) struct txx9dmac_platform_data *pdata = dev_get_platdata(&dmac_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) struct txx9dmac_chan *dc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) int ch = pdev->id % TXX9_DMA_MAX_NR_CHANNELS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) dc = devm_kzalloc(&pdev->dev, sizeof(*dc), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) if (!dc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) dc->dma.dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) dc->dma.device_terminate_all = txx9dmac_terminate_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) dc->dma.device_tx_status = txx9dmac_tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) dc->dma.device_issue_pending = txx9dmac_issue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) if (pdata && pdata->memcpy_chan == ch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) dc->dma.device_prep_dma_memcpy = txx9dmac_prep_dma_memcpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) dma_cap_set(DMA_MEMCPY, dc->dma.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) dc->dma.device_prep_slave_sg = txx9dmac_prep_slave_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) dma_cap_set(DMA_SLAVE, dc->dma.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) dma_cap_set(DMA_PRIVATE, dc->dma.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) INIT_LIST_HEAD(&dc->dma.channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) dc->ddev = platform_get_drvdata(dmac_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) if (dc->ddev->irq < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) if (irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) tasklet_setup(&dc->tasklet, txx9dmac_chan_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) dc->irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) err = devm_request_irq(&pdev->dev, dc->irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) txx9dmac_chan_interrupt, 0, dev_name(&pdev->dev), dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) dc->irq = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) dc->ddev->chan[ch] = dc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) dc->chan.device = &dc->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) list_add_tail(&dc->chan.device_node, &dc->chan.device->channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) dma_cookie_init(&dc->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) if (is_dmac64(dc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) dc->ch_regs = &__txx9dmac_regs(dc->ddev)->CHAN[ch];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) dc->ch_regs = &__txx9dmac_regs32(dc->ddev)->CHAN[ch];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) spin_lock_init(&dc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) INIT_LIST_HEAD(&dc->active_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) INIT_LIST_HEAD(&dc->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) INIT_LIST_HEAD(&dc->free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) txx9dmac_reset_chan(dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) platform_set_drvdata(pdev, dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) err = dma_async_device_register(&dc->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) dev_dbg(&pdev->dev, "TXx9 DMA Channel (dma%d%s%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) dc->dma.dev_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) dma_has_cap(DMA_MEMCPY, dc->dma.cap_mask) ? " memcpy" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) dma_has_cap(DMA_SLAVE, dc->dma.cap_mask) ? " slave" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) static int txx9dmac_chan_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) struct txx9dmac_chan *dc = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) dma_async_device_unregister(&dc->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) if (dc->irq >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) devm_free_irq(&pdev->dev, dc->irq, dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) tasklet_kill(&dc->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) dc->ddev->chan[pdev->id % TXX9_DMA_MAX_NR_CHANNELS] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) static int __init txx9dmac_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) struct txx9dmac_platform_data *pdata = dev_get_platdata(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) struct resource *io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) struct txx9dmac_dev *ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) u32 mcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) if (!io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) ddev = devm_kzalloc(&pdev->dev, sizeof(*ddev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) if (!ddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) if (!devm_request_mem_region(&pdev->dev, io->start, resource_size(io),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) dev_name(&pdev->dev)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) ddev->regs = devm_ioremap(&pdev->dev, io->start, resource_size(io));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) if (!ddev->regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) ddev->have_64bit_regs = pdata->have_64bit_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) if (__is_dmac64(ddev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) ddev->descsize = sizeof(struct txx9dmac_hwdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) ddev->descsize = sizeof(struct txx9dmac_hwdesc32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) /* force dma off, just in case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) txx9dmac_off(ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) ddev->irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) if (ddev->irq >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) tasklet_setup(&ddev->tasklet, txx9dmac_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) err = devm_request_irq(&pdev->dev, ddev->irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) txx9dmac_interrupt, 0, dev_name(&pdev->dev), ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) mcr = TXX9_DMA_MCR_MSTEN | MCR_LE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) if (pdata && pdata->memcpy_chan >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) mcr |= TXX9_DMA_MCR_FIFUM(pdata->memcpy_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) dma_writel(ddev, MCR, mcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) platform_set_drvdata(pdev, ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) static int txx9dmac_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) txx9dmac_off(ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) if (ddev->irq >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) devm_free_irq(&pdev->dev, ddev->irq, ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) tasklet_kill(&ddev->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) static void txx9dmac_shutdown(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) struct txx9dmac_dev *ddev = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) txx9dmac_off(ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) static int txx9dmac_suspend_noirq(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) struct txx9dmac_dev *ddev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) txx9dmac_off(ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) static int txx9dmac_resume_noirq(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) struct txx9dmac_dev *ddev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) struct txx9dmac_platform_data *pdata = dev_get_platdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) u32 mcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) mcr = TXX9_DMA_MCR_MSTEN | MCR_LE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) if (pdata && pdata->memcpy_chan >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) mcr |= TXX9_DMA_MCR_FIFUM(pdata->memcpy_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) dma_writel(ddev, MCR, mcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) static const struct dev_pm_ops txx9dmac_dev_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) .suspend_noirq = txx9dmac_suspend_noirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) .resume_noirq = txx9dmac_resume_noirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) static struct platform_driver txx9dmac_chan_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) .remove = txx9dmac_chan_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) .name = "txx9dmac-chan",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) static struct platform_driver txx9dmac_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) .remove = txx9dmac_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) .shutdown = txx9dmac_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) .name = "txx9dmac",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) .pm = &txx9dmac_dev_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) static int __init txx9dmac_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) rc = platform_driver_probe(&txx9dmac_driver, txx9dmac_probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) if (!rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) rc = platform_driver_probe(&txx9dmac_chan_driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) txx9dmac_chan_probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) platform_driver_unregister(&txx9dmac_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) module_init(txx9dmac_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) static void __exit txx9dmac_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) platform_driver_unregister(&txx9dmac_chan_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) platform_driver_unregister(&txx9dmac_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) module_exit(txx9dmac_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) MODULE_DESCRIPTION("TXx9 DMA Controller driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) MODULE_ALIAS("platform:txx9dmac");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) MODULE_ALIAS("platform:txx9dmac-chan");