^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Dmaengine driver base library for DMA controllers, found on SH-based SoCs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * extracted from shdma.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/shdma-base.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "../dmaengine.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /* DMA descriptor control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) enum shdma_desc_status {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) DESC_IDLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) DESC_PREPARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) DESC_SUBMITTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) DESC_COMPLETED, /* completed, have to call callback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) DESC_WAITING, /* callback called, waiting for ack / re-submit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define NR_DESCS_PER_CHANNEL 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define to_shdma_dev(d) container_of(d, struct shdma_dev, dma_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * For slave DMA we assume, that there is a finite number of DMA slaves in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * system, and that each such slave can only use a finite number of channels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * We use slave channel IDs to make sure, that no such slave channel ID is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * allocated more than once.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static unsigned int slave_num = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) module_param(slave_num, uint, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /* A bitmask with slave_num bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static unsigned long *shdma_slave_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /* Called under spin_lock_irq(&schan->chan_lock") */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static void shdma_chan_xfer_ld_queue(struct shdma_chan *schan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) const struct shdma_ops *ops = sdev->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct shdma_desc *sdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /* DMA work check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if (ops->channel_busy(schan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /* Find the first not transferred descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) list_for_each_entry(sdesc, &schan->ld_queue, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) if (sdesc->mark == DESC_SUBMITTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) ops->start_xfer(schan, sdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct shdma_desc *chunk, *c, *desc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) container_of(tx, struct shdma_desc, async_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct shdma_chan *schan = to_shdma_chan(tx->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) dma_async_tx_callback callback = tx->callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) bool power_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) spin_lock_irq(&schan->chan_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) power_up = list_empty(&schan->ld_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) cookie = dma_cookie_assign(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /* Mark all chunks of this descriptor as submitted, move to the queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * All chunks are on the global ld_free, so, we have to find
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * the end of the chain ourselves
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (chunk != desc && (chunk->mark == DESC_IDLE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) chunk->async_tx.cookie > 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) chunk->async_tx.cookie == -EBUSY ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) &chunk->node == &schan->ld_free))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) chunk->mark = DESC_SUBMITTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (chunk->chunks == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) chunk->async_tx.callback = callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) chunk->async_tx.callback_param = tx->callback_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /* Callback goes to the last chunk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) chunk->async_tx.callback = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) chunk->cookie = cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) list_move_tail(&chunk->node, &schan->ld_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) dev_dbg(schan->dev, "submit #%d@%p on %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) tx->cookie, &chunk->async_tx, schan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (power_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) schan->pm_state = SHDMA_PM_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) ret = pm_runtime_get(schan->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) spin_unlock_irq(&schan->chan_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) pm_runtime_put(schan->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) pm_runtime_barrier(schan->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) spin_lock_irq(&schan->chan_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /* Have we been reset, while waiting? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) if (schan->pm_state != SHDMA_PM_ESTABLISHED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct shdma_dev *sdev =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) to_shdma_dev(schan->dma_chan.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) const struct shdma_ops *ops = sdev->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) dev_dbg(schan->dev, "Bring up channel %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) schan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * TODO: .xfer_setup() might fail on some platforms.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * Make it int then, on error remove chunks from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * queue again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) ops->setup_xfer(schan, schan->slave_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (schan->pm_state == SHDMA_PM_PENDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) shdma_chan_xfer_ld_queue(schan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) schan->pm_state = SHDMA_PM_ESTABLISHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * Tell .device_issue_pending() not to run the queue, interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * will do it anyway
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) schan->pm_state = SHDMA_PM_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) spin_unlock_irq(&schan->chan_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /* Called with desc_lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct shdma_desc *sdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) list_for_each_entry(sdesc, &schan->ld_free, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (sdesc->mark != DESC_PREPARED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) BUG_ON(sdesc->mark != DESC_IDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) list_del(&sdesc->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) return sdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static int shdma_setup_slave(struct shdma_chan *schan, dma_addr_t slave_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) const struct shdma_ops *ops = sdev->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) int ret, match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (schan->dev->of_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) match = schan->hw_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) ret = ops->set_slave(schan, match, slave_addr, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) match = schan->real_slave_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (schan->real_slave_id < 0 || schan->real_slave_id >= slave_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (test_and_set_bit(schan->real_slave_id, shdma_slave_used))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) ret = ops->set_slave(schan, match, slave_addr, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) clear_bit(schan->real_slave_id, shdma_slave_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) schan->slave_id = schan->real_slave_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static int shdma_alloc_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) struct shdma_chan *schan = to_shdma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) const struct shdma_ops *ops = sdev->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) struct shdma_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) struct shdma_slave *slave = chan->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * This relies on the guarantee from dmaengine that alloc_chan_resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * never runs concurrently with itself or free_chan_resources.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (slave) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /* Legacy mode: .private is set in filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) schan->real_slave_id = slave->slave_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) ret = shdma_setup_slave(schan, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) goto esetslave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /* Normal mode: real_slave_id was set by filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) schan->slave_id = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) sdev->desc_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (!schan->desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) goto edescalloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) schan->desc_num = NR_DESCS_PER_CHANNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) for (i = 0; i < NR_DESCS_PER_CHANNEL; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) desc = ops->embedded_desc(schan->desc, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) dma_async_tx_descriptor_init(&desc->async_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) &schan->dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) desc->async_tx.tx_submit = shdma_tx_submit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) desc->mark = DESC_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) list_add(&desc->node, &schan->ld_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return NR_DESCS_PER_CHANNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) edescalloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) esetslave:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) clear_bit(slave->slave_id, shdma_slave_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) chan->private = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * This is the standard shdma filter function to be used as a replacement to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * "old" method, using the .private pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * You always have to pass a valid slave id as the argument, old drivers that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * pass ERR_PTR(-EINVAL) as a filter parameter and set it up in dma_slave_config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * need to be updated so we can remove the slave_id field from dma_slave_config.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * parameter. If this filter is used, the slave driver, after calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * dma_request_channel(), will also have to call dmaengine_slave_config() with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * .direction, and either .src_addr or .dst_addr set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * NOTE: this filter doesn't support multiple DMAC drivers with the DMA_SLAVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * capability! If this becomes a requirement, hardware glue drivers, using this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * services would have to provide their own filters, which first would check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * this, and only then, in case of a match, call this common filter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * NOTE 2: This filter function is also used in the DT case by shdma_of_xlate().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * In that case the MID-RID value is used for slave channel filtering and is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * passed to this function in the "arg" parameter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) bool shdma_chan_filter(struct dma_chan *chan, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct shdma_chan *schan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) struct shdma_dev *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) int slave_id = (long)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /* Only support channels handled by this driver. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (chan->device->device_alloc_chan_resources !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) shdma_alloc_chan_resources)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) schan = to_shdma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) sdev = to_shdma_dev(chan->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * For DT, the schan->slave_id field is generated by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * set_slave function from the slave ID that is passed in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * from xlate. For the non-DT case, the slave ID is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * directly passed into the filter function by the driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (schan->dev->of_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) ret = sdev->ops->set_slave(schan, slave_id, 0, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) schan->real_slave_id = schan->slave_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (slave_id < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) /* No slave requested - arbitrary channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) dev_warn(sdev->dma_dev.dev, "invalid slave ID passed to dma_request_slave\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) if (slave_id >= slave_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) ret = sdev->ops->set_slave(schan, slave_id, 0, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) schan->real_slave_id = slave_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) EXPORT_SYMBOL(shdma_chan_filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) struct shdma_desc *desc, *_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) /* Is the "exposed" head of a chain acked? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) bool head_acked = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) dma_cookie_t cookie = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) dma_async_tx_callback callback = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) struct dmaengine_desc_callback cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) LIST_HEAD(cyclic_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) memset(&cb, 0, sizeof(cb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) spin_lock_irqsave(&schan->chan_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) struct dma_async_tx_descriptor *tx = &desc->async_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) BUG_ON(desc->mark != DESC_SUBMITTED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) desc->mark != DESC_COMPLETED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) desc->mark != DESC_WAITING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * queue is ordered, and we use this loop to (1) clean up all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * completed descriptors, and to (2) update descriptor flags of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * any chunks in a (partially) completed chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (!all && desc->mark == DESC_SUBMITTED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) desc->cookie != cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (tx->cookie > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) cookie = tx->cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (schan->dma_chan.completed_cookie != desc->cookie - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) dev_dbg(schan->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) "Completing cookie %d, expected %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) desc->cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) schan->dma_chan.completed_cookie + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) schan->dma_chan.completed_cookie = desc->cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) /* Call callback on the last chunk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (desc->mark == DESC_COMPLETED && tx->callback) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) desc->mark = DESC_WAITING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) dmaengine_desc_get_callback(tx, &cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) callback = tx->callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) tx->cookie, tx, schan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) BUG_ON(desc->chunks != 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (tx->cookie > 0 || tx->cookie == -EBUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (desc->mark == DESC_COMPLETED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) BUG_ON(tx->cookie < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) desc->mark = DESC_WAITING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) head_acked = async_tx_test_ack(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) switch (desc->mark) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) case DESC_COMPLETED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) desc->mark = DESC_WAITING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) case DESC_WAITING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (head_acked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) async_tx_ack(&desc->async_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) dev_dbg(schan->dev, "descriptor %p #%d completed.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) tx, tx->cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (((desc->mark == DESC_COMPLETED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) desc->mark == DESC_WAITING) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) async_tx_test_ack(&desc->async_tx)) || all) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (all || !desc->cyclic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) /* Remove from ld_queue list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) desc->mark = DESC_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) list_move(&desc->node, &schan->ld_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) /* reuse as cyclic */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) desc->mark = DESC_SUBMITTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) list_move_tail(&desc->node, &cyclic_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (list_empty(&schan->ld_queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) pm_runtime_put(schan->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) schan->pm_state = SHDMA_PM_ESTABLISHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) } else if (schan->pm_state == SHDMA_PM_PENDING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) shdma_chan_xfer_ld_queue(schan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (all && !callback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * Terminating and the loop completed normally: forgive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * uncompleted cookies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) schan->dma_chan.completed_cookie = schan->dma_chan.cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) list_splice_tail(&cyclic_list, &schan->ld_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) spin_unlock_irqrestore(&schan->chan_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) dmaengine_desc_callback_invoke(&cb, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) return callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * shdma_chan_ld_cleanup - Clean up link descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * Clean up the ld_queue of DMA channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) static void shdma_chan_ld_cleanup(struct shdma_chan *schan, bool all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) while (__ld_cleanup(schan, all))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * shdma_free_chan_resources - Free all resources of the channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) static void shdma_free_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) struct shdma_chan *schan = to_shdma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) struct shdma_dev *sdev = to_shdma_dev(chan->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) const struct shdma_ops *ops = sdev->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) LIST_HEAD(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) /* Protect against ISR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) spin_lock_irq(&schan->chan_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) ops->halt_channel(schan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) spin_unlock_irq(&schan->chan_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) /* Now no new interrupts will occur */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) /* Prepared and not submitted descriptors can still be on the queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (!list_empty(&schan->ld_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) shdma_chan_ld_cleanup(schan, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) if (schan->slave_id >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) /* The caller is holding dma_list_mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) clear_bit(schan->slave_id, shdma_slave_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) chan->private = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) schan->real_slave_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) spin_lock_irq(&schan->chan_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) list_splice_init(&schan->ld_free, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) schan->desc_num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) spin_unlock_irq(&schan->chan_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) kfree(schan->desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * shdma_add_desc - get, set up and return one transfer descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * @schan: DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * @flags: DMA transfer flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * @dst: destination DMA address, incremented when direction equals
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * DMA_DEV_TO_MEM or DMA_MEM_TO_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * @src: source DMA address, incremented when direction equals
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * DMA_MEM_TO_DEV or DMA_MEM_TO_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * @len: DMA transfer length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * @first: if NULL, set to the current descriptor and cookie set to -EBUSY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * @direction: needed for slave DMA to decide which address to keep constant,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * equals DMA_MEM_TO_MEM for MEMCPY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * Returns 0 or an error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * Locks: called with desc_lock held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) unsigned long flags, dma_addr_t *dst, dma_addr_t *src, size_t *len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) struct shdma_desc **first, enum dma_transfer_direction direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) const struct shdma_ops *ops = sdev->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) struct shdma_desc *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) size_t copy_size = *len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) if (!copy_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) /* Allocate the link descriptor from the free list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) new = shdma_get_desc(schan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (!new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) dev_err(schan->dev, "No free link descriptor available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) ops->desc_setup(schan, new, *src, *dst, ©_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) if (!*first) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) /* First desc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) new->async_tx.cookie = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) *first = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) /* Other desc - invisible to the user */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) new->async_tx.cookie = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) dev_dbg(schan->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) "chaining (%zu/%zu)@%pad -> %pad with %p, cookie %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) copy_size, *len, src, dst, &new->async_tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) new->async_tx.cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) new->mark = DESC_PREPARED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) new->async_tx.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) new->direction = direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) new->partial = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) *len -= copy_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) *src += copy_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) *dst += copy_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) return new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * shdma_prep_sg - prepare transfer descriptors from an SG list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) * converted to scatter-gather to guarantee consistent locking and a correct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * list manipulation. For slave DMA direction carries the usual meaning, and,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * logically, the SG list is RAM and the addr variable contains slave address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * and the SG list contains only one element and points at the source buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) enum dma_transfer_direction direction, unsigned long flags, bool cyclic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) struct shdma_desc *first = NULL, *new = NULL /* compiler... */;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) LIST_HEAD(tx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) int chunks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) unsigned long irq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) for_each_sg(sgl, sg, sg_len, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) /* Have to lock the whole loop to protect against concurrent release */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) spin_lock_irqsave(&schan->chan_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * Chaining:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * first descriptor is what user is dealing with in all API calls, its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * cookie is at first set to -EBUSY, at tx-submit to a positive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) * number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * if more than one chunk is needed further chunks have cookie = -EINVAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * the last chunk, if not equal to the first, has cookie = -ENOSPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) * all chunks are linked onto the tx_list head with their .node heads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * only during this function, then they are immediately spliced
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) * back onto the free list in form of a chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) for_each_sg(sgl, sg, sg_len, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) dma_addr_t sg_addr = sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) size_t len = sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (!len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) goto err_get_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) dev_dbg(schan->dev, "Add SG #%d@%p[%zu], dma %pad\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) i, sg, len, &sg_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) if (direction == DMA_DEV_TO_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) new = shdma_add_desc(schan, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) &sg_addr, addr, &len, &first,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) new = shdma_add_desc(schan, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) addr, &sg_addr, &len, &first,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (!new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) goto err_get_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) new->cyclic = cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (cyclic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) new->chunks = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) new->chunks = chunks--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) list_add_tail(&new->node, &tx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) } while (len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if (new != first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) new->async_tx.cookie = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) /* Put them back on the free list, so, they don't get lost */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) list_splice_tail(&tx_list, &schan->ld_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) return &first->async_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) err_get_desc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) list_for_each_entry(new, &tx_list, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) new->mark = DESC_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) list_splice(&tx_list, &schan->ld_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) static struct dma_async_tx_descriptor *shdma_prep_memcpy(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) size_t len, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) struct shdma_chan *schan = to_shdma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) struct scatterlist sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) if (!chan || !len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) BUG_ON(!schan->desc_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) sg_init_table(&sg, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) offset_in_page(dma_src));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) sg_dma_address(&sg) = dma_src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) sg_dma_len(&sg) = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) flags, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) static struct dma_async_tx_descriptor *shdma_prep_slave_sg(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) enum dma_transfer_direction direction, unsigned long flags, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) struct shdma_chan *schan = to_shdma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) const struct shdma_ops *ops = sdev->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) int slave_id = schan->slave_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) dma_addr_t slave_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) if (!chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) BUG_ON(!schan->desc_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) /* Someone calling slave DMA on a generic channel? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) if (slave_id < 0 || !sg_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) dev_warn(schan->dev, "%s: bad parameter: len=%d, id=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) __func__, sg_len, slave_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) slave_addr = ops->slave_addr(schan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) return shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) direction, flags, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) #define SHDMA_MAX_SG_LEN 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) size_t period_len, enum dma_transfer_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) struct shdma_chan *schan = to_shdma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) struct dma_async_tx_descriptor *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) const struct shdma_ops *ops = sdev->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) unsigned int sg_len = buf_len / period_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) int slave_id = schan->slave_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) dma_addr_t slave_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) struct scatterlist *sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) if (!chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) BUG_ON(!schan->desc_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) if (sg_len > SHDMA_MAX_SG_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) dev_err(schan->dev, "sg length %d exceeds limit %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) sg_len, SHDMA_MAX_SG_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) /* Someone calling slave DMA on a generic channel? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) if (slave_id < 0 || (buf_len < period_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) dev_warn(schan->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) __func__, buf_len, period_len, slave_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) slave_addr = ops->slave_addr(schan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * Allocate the sg list dynamically as it would consumer too much stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) sgl = kmalloc_array(sg_len, sizeof(*sgl), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) if (!sgl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) sg_init_table(sgl, sg_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) for (i = 0; i < sg_len; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) dma_addr_t src = buf_addr + (period_len * i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) offset_in_page(src));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) sg_dma_address(&sgl[i]) = src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) sg_dma_len(&sgl[i]) = period_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) desc = shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) direction, flags, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) kfree(sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) return desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) static int shdma_terminate_all(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) struct shdma_chan *schan = to_shdma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) struct shdma_dev *sdev = to_shdma_dev(chan->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) const struct shdma_ops *ops = sdev->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) spin_lock_irqsave(&schan->chan_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) ops->halt_channel(schan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) if (ops->get_partial && !list_empty(&schan->ld_queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) /* Record partial transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) struct shdma_desc *desc = list_first_entry(&schan->ld_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) struct shdma_desc, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) desc->partial = ops->get_partial(schan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) spin_unlock_irqrestore(&schan->chan_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) shdma_chan_ld_cleanup(schan, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) static int shdma_config(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) struct dma_slave_config *config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) struct shdma_chan *schan = to_shdma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) * So far only .slave_id is used, but the slave drivers are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) * encouraged to also set a transfer direction and an address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (!config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) * overriding the slave_id through dma_slave_config is deprecated,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) * but possibly some out-of-tree drivers still do it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) if (WARN_ON_ONCE(config->slave_id &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) config->slave_id != schan->real_slave_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) schan->real_slave_id = config->slave_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) * We could lock this, but you shouldn't be configuring the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * channel, while using it...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) return shdma_setup_slave(schan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) config->direction == DMA_DEV_TO_MEM ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) config->src_addr : config->dst_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) static void shdma_issue_pending(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) struct shdma_chan *schan = to_shdma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) spin_lock_irq(&schan->chan_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) if (schan->pm_state == SHDMA_PM_ESTABLISHED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) shdma_chan_xfer_ld_queue(schan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) schan->pm_state = SHDMA_PM_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) spin_unlock_irq(&schan->chan_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) static enum dma_status shdma_tx_status(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) dma_cookie_t cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) struct dma_tx_state *txstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) struct shdma_chan *schan = to_shdma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) enum dma_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) shdma_chan_ld_cleanup(schan, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) spin_lock_irqsave(&schan->chan_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) status = dma_cookie_status(chan, cookie, txstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) * If we don't find cookie on the queue, it has been aborted and we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) * to report error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) if (status != DMA_COMPLETE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) struct shdma_desc *sdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) status = DMA_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) list_for_each_entry(sdesc, &schan->ld_queue, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) if (sdesc->cookie == cookie) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) status = DMA_IN_PROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) spin_unlock_irqrestore(&schan->chan_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) /* Called from error IRQ or NMI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) bool shdma_reset(struct shdma_dev *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) const struct shdma_ops *ops = sdev->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) struct shdma_chan *schan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) unsigned int handled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) /* Reset all channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) shdma_for_each_chan(schan, sdev, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) struct shdma_desc *sdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) LIST_HEAD(dl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) if (!schan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) spin_lock(&schan->chan_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) /* Stop the channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) ops->halt_channel(schan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) list_splice_init(&schan->ld_queue, &dl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) if (!list_empty(&dl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) pm_runtime_put(schan->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) schan->pm_state = SHDMA_PM_ESTABLISHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) spin_unlock(&schan->chan_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) /* Complete all */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) list_for_each_entry(sdesc, &dl, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) struct dma_async_tx_descriptor *tx = &sdesc->async_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) sdesc->mark = DESC_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) dmaengine_desc_get_callback_invoke(tx, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) spin_lock(&schan->chan_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) list_splice(&dl, &schan->ld_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) spin_unlock(&schan->chan_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) handled++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) return !!handled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) EXPORT_SYMBOL(shdma_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) static irqreturn_t chan_irq(int irq, void *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) struct shdma_chan *schan = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) const struct shdma_ops *ops =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) to_shdma_dev(schan->dma_chan.device)->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) irqreturn_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) spin_lock(&schan->chan_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) ret = ops->chan_irq(schan, irq) ? IRQ_WAKE_THREAD : IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) spin_unlock(&schan->chan_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) static irqreturn_t chan_irqt(int irq, void *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) struct shdma_chan *schan = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) const struct shdma_ops *ops =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) to_shdma_dev(schan->dma_chan.device)->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) struct shdma_desc *sdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) spin_lock_irq(&schan->chan_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) list_for_each_entry(sdesc, &schan->ld_queue, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) if (sdesc->mark == DESC_SUBMITTED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) ops->desc_completed(schan, sdesc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) dev_dbg(schan->dev, "done #%d@%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) sdesc->async_tx.cookie, &sdesc->async_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) sdesc->mark = DESC_COMPLETED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) /* Next desc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) shdma_chan_xfer_ld_queue(schan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) spin_unlock_irq(&schan->chan_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) shdma_chan_ld_cleanup(schan, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) int shdma_request_irq(struct shdma_chan *schan, int irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) unsigned long flags, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) int ret = devm_request_threaded_irq(schan->dev, irq, chan_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) chan_irqt, flags, name, schan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) schan->irq = ret < 0 ? ret : irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) EXPORT_SYMBOL(shdma_request_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) void shdma_chan_probe(struct shdma_dev *sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) struct shdma_chan *schan, int id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) schan->pm_state = SHDMA_PM_ESTABLISHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) /* reference struct dma_device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) schan->dma_chan.device = &sdev->dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) dma_cookie_init(&schan->dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) schan->dev = sdev->dma_dev.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) schan->id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) if (!schan->max_xfer_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) schan->max_xfer_len = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) spin_lock_init(&schan->chan_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) /* Init descripter manage list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) INIT_LIST_HEAD(&schan->ld_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) INIT_LIST_HEAD(&schan->ld_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) /* Add the channel to DMA device channel list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) list_add_tail(&schan->dma_chan.device_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) &sdev->dma_dev.channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) sdev->schan[id] = schan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) EXPORT_SYMBOL(shdma_chan_probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) void shdma_chan_remove(struct shdma_chan *schan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) list_del(&schan->dma_chan.device_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) EXPORT_SYMBOL(shdma_chan_remove);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) int shdma_init(struct device *dev, struct shdma_dev *sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) int chan_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) struct dma_device *dma_dev = &sdev->dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) * Require all call-backs for now, they can trivially be made optional
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) * later as required
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) if (!sdev->ops ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) !sdev->desc_size ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) !sdev->ops->embedded_desc ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) !sdev->ops->start_xfer ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) !sdev->ops->setup_xfer ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) !sdev->ops->set_slave ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) !sdev->ops->desc_setup ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) !sdev->ops->slave_addr ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) !sdev->ops->channel_busy ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) !sdev->ops->halt_channel ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) !sdev->ops->desc_completed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) sdev->schan = kcalloc(chan_num, sizeof(*sdev->schan), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) if (!sdev->schan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) INIT_LIST_HEAD(&dma_dev->channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) /* Common and MEMCPY operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) dma_dev->device_alloc_chan_resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) = shdma_alloc_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) dma_dev->device_free_chan_resources = shdma_free_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) dma_dev->device_prep_dma_memcpy = shdma_prep_memcpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) dma_dev->device_tx_status = shdma_tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) dma_dev->device_issue_pending = shdma_issue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) /* Compulsory for DMA_SLAVE fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) dma_dev->device_prep_slave_sg = shdma_prep_slave_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) dma_dev->device_prep_dma_cyclic = shdma_prep_dma_cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) dma_dev->device_config = shdma_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) dma_dev->device_terminate_all = shdma_terminate_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) dma_dev->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) EXPORT_SYMBOL(shdma_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) void shdma_cleanup(struct shdma_dev *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) kfree(sdev->schan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) EXPORT_SYMBOL(shdma_cleanup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) static int __init shdma_enter(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) shdma_slave_used = kcalloc(DIV_ROUND_UP(slave_num, BITS_PER_LONG),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) sizeof(long),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) if (!shdma_slave_used)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) module_init(shdma_enter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) static void __exit shdma_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) kfree(shdma_slave_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) module_exit(shdma_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) MODULE_DESCRIPTION("SH-DMA driver base library");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");