^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * AMD Cryptographic Coprocessor (CCP) driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2016,2019 Advanced Micro Devices, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Author: Gary R Hook <gary.hook@amd.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/ccp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "ccp-dev.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "../../dma/dmaengine.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define CCP_DMA_WIDTH(_mask) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) u64 mask = _mask + 1; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) (mask == 0) ? 64 : fls64(mask); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /* The CCP as a DMA provider can be configured for public or private
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * channels. Default is specified in the vdata for the device (PCI ID).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * This module parameter will override for all channels on all devices:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * dma_chan_attr = 0x2 to force all channels public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * = 0x1 to force all channels private
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * = 0x0 to defer to the vdata setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * = any other value: warning, revert to 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static unsigned int dma_chan_attr = CCP_DMA_DFLT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) module_param(dma_chan_attr, uint, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) MODULE_PARM_DESC(dma_chan_attr, "Set DMA channel visibility: 0 (default) = device defaults, 1 = make private, 2 = make public");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static unsigned int dmaengine = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) module_param(dmaengine, uint, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) MODULE_PARM_DESC(dmaengine, "Register services with the DMA subsystem (any non-zero value, default: 1)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static unsigned int ccp_get_dma_chan_attr(struct ccp_device *ccp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) switch (dma_chan_attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) case CCP_DMA_DFLT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) return ccp->vdata->dma_chan_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) case CCP_DMA_PRIV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) return DMA_PRIVATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) case CCP_DMA_PUB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) dev_info_once(ccp->dev, "Invalid value for dma_chan_attr: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) dma_chan_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return ccp->vdata->dma_chan_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) static void ccp_free_cmd_resources(struct ccp_device *ccp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct list_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct ccp_dma_cmd *cmd, *ctmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) list_for_each_entry_safe(cmd, ctmp, list, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) list_del(&cmd->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) kmem_cache_free(ccp->dma_cmd_cache, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static void ccp_free_desc_resources(struct ccp_device *ccp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct list_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct ccp_dma_desc *desc, *dtmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) list_for_each_entry_safe(desc, dtmp, list, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) ccp_free_cmd_resources(ccp, &desc->active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) ccp_free_cmd_resources(ccp, &desc->pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) list_del(&desc->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) kmem_cache_free(ccp->dma_desc_cache, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static void ccp_free_chan_resources(struct dma_chan *dma_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) dev_dbg(chan->ccp->dev, "%s - chan=%p\n", __func__, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) spin_lock_irqsave(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) ccp_free_desc_resources(chan->ccp, &chan->complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) ccp_free_desc_resources(chan->ccp, &chan->active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) ccp_free_desc_resources(chan->ccp, &chan->pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) ccp_free_desc_resources(chan->ccp, &chan->created);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) spin_unlock_irqrestore(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static void ccp_cleanup_desc_resources(struct ccp_device *ccp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct list_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct ccp_dma_desc *desc, *dtmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) list_for_each_entry_safe_reverse(desc, dtmp, list, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (!async_tx_test_ack(&desc->tx_desc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) ccp_free_cmd_resources(ccp, &desc->active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) ccp_free_cmd_resources(ccp, &desc->pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) list_del(&desc->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) kmem_cache_free(ccp->dma_desc_cache, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static void ccp_do_cleanup(unsigned long data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct ccp_dma_chan *chan = (struct ccp_dma_chan *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) dev_dbg(chan->ccp->dev, "%s - chan=%s\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) dma_chan_name(&chan->dma_chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) spin_lock_irqsave(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) ccp_cleanup_desc_resources(chan->ccp, &chan->complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) spin_unlock_irqrestore(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static int ccp_issue_next_cmd(struct ccp_dma_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct ccp_dma_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) cmd = list_first_entry(&desc->pending, struct ccp_dma_cmd, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) list_move(&cmd->entry, &desc->active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) dev_dbg(desc->ccp->dev, "%s - tx %d, cmd=%p\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) desc->tx_desc.cookie, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) ret = ccp_enqueue_cmd(&cmd->ccp_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) if (!ret || (ret == -EINPROGRESS) || (ret == -EBUSY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) dev_dbg(desc->ccp->dev, "%s - error: ret=%d, tx %d, cmd=%p\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) ret, desc->tx_desc.cookie, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static void ccp_free_active_cmd(struct ccp_dma_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct ccp_dma_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) cmd = list_first_entry_or_null(&desc->active, struct ccp_dma_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (!cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) dev_dbg(desc->ccp->dev, "%s - freeing tx %d cmd=%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) __func__, desc->tx_desc.cookie, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) list_del(&cmd->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) kmem_cache_free(desc->ccp->dma_cmd_cache, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static struct ccp_dma_desc *__ccp_next_dma_desc(struct ccp_dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) struct ccp_dma_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /* Move current DMA descriptor to the complete list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) if (desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) list_move(&desc->entry, &chan->complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /* Get the next DMA descriptor on the active list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) return desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) static struct ccp_dma_desc *ccp_handle_active_desc(struct ccp_dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct ccp_dma_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct dma_async_tx_descriptor *tx_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) /* Loop over descriptors until one is found with commands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) /* Remove the DMA command from the list and free it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) ccp_free_active_cmd(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (!list_empty(&desc->pending)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) /* No errors, keep going */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (desc->status != DMA_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) return desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /* Error, free remaining commands and move on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) ccp_free_cmd_resources(desc->ccp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) &desc->pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) tx_desc = &desc->tx_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) tx_desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) spin_lock_irqsave(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (desc->status != DMA_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) desc->status = DMA_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) dev_dbg(desc->ccp->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) "%s - tx %d complete, status=%u\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) desc->tx_desc.cookie, desc->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) dma_cookie_complete(tx_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) dma_descriptor_unmap(tx_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) desc = __ccp_next_dma_desc(chan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) spin_unlock_irqrestore(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (tx_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) dmaengine_desc_get_callback_invoke(tx_desc, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) dma_run_dependencies(tx_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) } while (desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static struct ccp_dma_desc *__ccp_pending_to_active(struct ccp_dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) struct ccp_dma_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (list_empty(&chan->pending))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) desc = list_empty(&chan->active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) ? list_first_entry(&chan->pending, struct ccp_dma_desc, entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) list_splice_tail_init(&chan->pending, &chan->active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) return desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static void ccp_cmd_callback(void *data, int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct ccp_dma_desc *desc = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct ccp_dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (err == -EINPROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) chan = container_of(desc->tx_desc.chan, struct ccp_dma_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) dev_dbg(chan->ccp->dev, "%s - tx %d callback, err=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) __func__, desc->tx_desc.cookie, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) desc->status = DMA_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) /* Check for DMA descriptor completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) desc = ccp_handle_active_desc(chan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /* Don't submit cmd if no descriptor or DMA is paused */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (!desc || (chan->status == DMA_PAUSED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) ret = ccp_issue_next_cmd(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) desc->status = DMA_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) tasklet_schedule(&chan->cleanup_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static dma_cookie_t ccp_tx_submit(struct dma_async_tx_descriptor *tx_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) struct ccp_dma_desc *desc = container_of(tx_desc, struct ccp_dma_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) tx_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) struct ccp_dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) chan = container_of(tx_desc->chan, struct ccp_dma_chan, dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) spin_lock_irqsave(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) cookie = dma_cookie_assign(tx_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) list_del(&desc->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) list_add_tail(&desc->entry, &chan->pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) spin_unlock_irqrestore(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) dev_dbg(chan->ccp->dev, "%s - added tx descriptor %d to pending list\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) __func__, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) static struct ccp_dma_cmd *ccp_alloc_dma_cmd(struct ccp_dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) struct ccp_dma_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) cmd = kmem_cache_alloc(chan->ccp->dma_cmd_cache, GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) memset(cmd, 0, sizeof(*cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) return cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) static struct ccp_dma_desc *ccp_alloc_dma_desc(struct ccp_dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) struct ccp_dma_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) desc = kmem_cache_zalloc(chan->ccp->dma_desc_cache, GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) dma_async_tx_descriptor_init(&desc->tx_desc, &chan->dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) desc->tx_desc.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) desc->tx_desc.tx_submit = ccp_tx_submit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) desc->ccp = chan->ccp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) INIT_LIST_HEAD(&desc->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) INIT_LIST_HEAD(&desc->pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) INIT_LIST_HEAD(&desc->active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) desc->status = DMA_IN_PROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) struct scatterlist *dst_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) unsigned int dst_nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) struct scatterlist *src_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) unsigned int src_nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) struct ccp_device *ccp = chan->ccp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) struct ccp_dma_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) struct ccp_dma_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) struct ccp_cmd *ccp_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) struct ccp_passthru_nomap_engine *ccp_pt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) unsigned int src_offset, src_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) unsigned int dst_offset, dst_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) unsigned long sflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) size_t total_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) if (!dst_sg || !src_sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (!dst_nents || !src_nents)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) desc = ccp_alloc_dma_desc(chan, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) total_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) src_len = sg_dma_len(src_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) src_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) dst_len = sg_dma_len(dst_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) dst_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (!src_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) src_nents--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) if (!src_nents)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) src_sg = sg_next(src_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (!src_sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) src_len = sg_dma_len(src_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) src_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (!dst_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) dst_nents--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (!dst_nents)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) dst_sg = sg_next(dst_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (!dst_sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) dst_len = sg_dma_len(dst_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) dst_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) len = min(dst_len, src_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) cmd = ccp_alloc_dma_cmd(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if (!cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) ccp_cmd = &cmd->ccp_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) ccp_cmd->ccp = chan->ccp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) ccp_pt = &ccp_cmd->u.passthru_nomap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) ccp_cmd->flags = CCP_CMD_MAY_BACKLOG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) ccp_cmd->flags |= CCP_CMD_PASSTHRU_NO_DMA_MAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) ccp_cmd->engine = CCP_ENGINE_PASSTHRU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) ccp_pt->bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) ccp_pt->byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) ccp_pt->src_dma = sg_dma_address(src_sg) + src_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) ccp_pt->dst_dma = sg_dma_address(dst_sg) + dst_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) ccp_pt->src_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) ccp_pt->final = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) ccp_cmd->callback = ccp_cmd_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) ccp_cmd->data = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) list_add_tail(&cmd->entry, &desc->pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) dev_dbg(ccp->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) "%s - cmd=%p, src=%pad, dst=%pad, len=%llu\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) cmd, &ccp_pt->src_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) &ccp_pt->dst_dma, ccp_pt->src_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) total_len += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) src_len -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) src_offset += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) dst_len -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) dst_offset += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) desc->len = total_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (list_empty(&desc->pending))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) spin_lock_irqsave(&chan->lock, sflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) list_add_tail(&desc->entry, &chan->created);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) spin_unlock_irqrestore(&chan->lock, sflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) return desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) ccp_free_cmd_resources(ccp, &desc->pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) kmem_cache_free(ccp->dma_desc_cache, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) static struct dma_async_tx_descriptor *ccp_prep_dma_memcpy(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct dma_chan *dma_chan, dma_addr_t dst, dma_addr_t src, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) struct ccp_dma_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) struct scatterlist dst_sg, src_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) dev_dbg(chan->ccp->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) "%s - src=%pad, dst=%pad, len=%zu, flags=%#lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) __func__, &src, &dst, len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) sg_init_table(&dst_sg, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) sg_dma_address(&dst_sg) = dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) sg_dma_len(&dst_sg) = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) sg_init_table(&src_sg, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) sg_dma_address(&src_sg) = src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) sg_dma_len(&src_sg) = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) desc = ccp_create_desc(dma_chan, &dst_sg, 1, &src_sg, 1, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) return &desc->tx_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) static struct dma_async_tx_descriptor *ccp_prep_dma_interrupt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) struct dma_chan *dma_chan, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) struct ccp_dma_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) desc = ccp_alloc_dma_desc(chan, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) return &desc->tx_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) static void ccp_issue_pending(struct dma_chan *dma_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) struct ccp_dma_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) dev_dbg(chan->ccp->dev, "%s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) spin_lock_irqsave(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) desc = __ccp_pending_to_active(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) spin_unlock_irqrestore(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) /* If there was nothing active, start processing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) ccp_cmd_callback(desc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) static enum dma_status ccp_tx_status(struct dma_chan *dma_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) dma_cookie_t cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) struct dma_tx_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) struct ccp_dma_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) enum dma_status ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) if (chan->status == DMA_PAUSED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) ret = DMA_PAUSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) ret = dma_cookie_status(dma_chan, cookie, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (ret == DMA_COMPLETE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) spin_lock_irqsave(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) /* Get status from complete chain, if still there */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) list_for_each_entry(desc, &chan->complete, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (desc->tx_desc.cookie != cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) ret = desc->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) spin_unlock_irqrestore(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) dev_dbg(chan->ccp->dev, "%s - %u\n", __func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) static int ccp_pause(struct dma_chan *dma_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) chan->status = DMA_PAUSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) /*TODO: Wait for active DMA to complete before returning? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) static int ccp_resume(struct dma_chan *dma_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) struct ccp_dma_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) spin_lock_irqsave(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) spin_unlock_irqrestore(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) /* Indicate the channel is running again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) chan->status = DMA_IN_PROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) /* If there was something active, re-start */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) ccp_cmd_callback(desc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) static int ccp_terminate_all(struct dma_chan *dma_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) dev_dbg(chan->ccp->dev, "%s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) /*TODO: Wait for active DMA to complete before continuing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) spin_lock_irqsave(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) /*TODO: Purge the complete list? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) ccp_free_desc_resources(chan->ccp, &chan->active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) ccp_free_desc_resources(chan->ccp, &chan->pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) ccp_free_desc_resources(chan->ccp, &chan->created);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) spin_unlock_irqrestore(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) static void ccp_dma_release(struct ccp_device *ccp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) struct ccp_dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) struct dma_chan *dma_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) for (i = 0; i < ccp->cmd_q_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) chan = ccp->ccp_dma_chan + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) dma_chan = &chan->dma_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) tasklet_kill(&chan->cleanup_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) list_del_rcu(&dma_chan->device_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) int ccp_dmaengine_register(struct ccp_device *ccp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) struct ccp_dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) struct dma_device *dma_dev = &ccp->dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) struct dma_chan *dma_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) char *dma_cmd_cache_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) char *dma_desc_cache_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) if (!dmaengine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) ccp->ccp_dma_chan = devm_kcalloc(ccp->dev, ccp->cmd_q_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) sizeof(*(ccp->ccp_dma_chan)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (!ccp->ccp_dma_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) dma_cmd_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) "%s-dmaengine-cmd-cache",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) ccp->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) if (!dma_cmd_cache_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) ccp->dma_cmd_cache = kmem_cache_create(dma_cmd_cache_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) sizeof(struct ccp_dma_cmd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) sizeof(void *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) SLAB_HWCACHE_ALIGN, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) if (!ccp->dma_cmd_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) dma_desc_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) "%s-dmaengine-desc-cache",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) ccp->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if (!dma_desc_cache_name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) goto err_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) ccp->dma_desc_cache = kmem_cache_create(dma_desc_cache_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) sizeof(struct ccp_dma_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) sizeof(void *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) SLAB_HWCACHE_ALIGN, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if (!ccp->dma_desc_cache) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) goto err_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) dma_dev->dev = ccp->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) dma_dev->src_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) dma_dev->dst_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) dma_dev->directions = DMA_MEM_TO_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) /* The DMA channels for this device can be set to public or private,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) * and overridden by the module parameter dma_chan_attr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) * Default: according to the value in vdata (dma_chan_attr=0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) * dma_chan_attr=0x1: all channels private (override vdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * dma_chan_attr=0x2: all channels public (override vdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) if (ccp_get_dma_chan_attr(ccp) == DMA_PRIVATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) INIT_LIST_HEAD(&dma_dev->channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) for (i = 0; i < ccp->cmd_q_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) chan = ccp->ccp_dma_chan + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) dma_chan = &chan->dma_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) chan->ccp = ccp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) spin_lock_init(&chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) INIT_LIST_HEAD(&chan->created);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) INIT_LIST_HEAD(&chan->pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) INIT_LIST_HEAD(&chan->active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) INIT_LIST_HEAD(&chan->complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) tasklet_init(&chan->cleanup_tasklet, ccp_do_cleanup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) (unsigned long)chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) dma_chan->device = dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) dma_cookie_init(dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) list_add_tail(&dma_chan->device_node, &dma_dev->channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) dma_dev->device_free_chan_resources = ccp_free_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) dma_dev->device_prep_dma_memcpy = ccp_prep_dma_memcpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) dma_dev->device_prep_dma_interrupt = ccp_prep_dma_interrupt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) dma_dev->device_issue_pending = ccp_issue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) dma_dev->device_tx_status = ccp_tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) dma_dev->device_pause = ccp_pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) dma_dev->device_resume = ccp_resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) dma_dev->device_terminate_all = ccp_terminate_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) ret = dma_async_device_register(dma_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) goto err_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) err_reg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) ccp_dma_release(ccp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) kmem_cache_destroy(ccp->dma_desc_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) err_cache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) kmem_cache_destroy(ccp->dma_cmd_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) void ccp_dmaengine_unregister(struct ccp_device *ccp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) struct dma_device *dma_dev = &ccp->dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) if (!dmaengine)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) dma_async_device_unregister(dma_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) ccp_dma_release(ccp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) kmem_cache_destroy(ccp->dma_desc_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) kmem_cache_destroy(ccp->dma_cmd_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) }