^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <crypto/scatterwalk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "dma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) int qce_dma_request(struct device *dev, struct qce_dma_data *dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) dma->txchan = dma_request_chan(dev, "tx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) if (IS_ERR(dma->txchan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) return PTR_ERR(dma->txchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) dma->rxchan = dma_request_chan(dev, "rx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) if (IS_ERR(dma->rxchan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) ret = PTR_ERR(dma->rxchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) goto error_rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) dma->result_buf = kmalloc(QCE_RESULT_BUF_SZ + QCE_IGNORE_BUF_SZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) if (!dma->result_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) goto error_nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) dma->ignore_buf = dma->result_buf + QCE_RESULT_BUF_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) error_nomem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) dma_release_channel(dma->rxchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) error_rx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) dma_release_channel(dma->txchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) void qce_dma_release(struct qce_dma_data *dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) dma_release_channel(dma->txchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) dma_release_channel(dma->rxchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) kfree(dma->result_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct scatterlist *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) unsigned int max_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct scatterlist *sg = sgt->sgl, *sg_last = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) unsigned int new_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) while (sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) if (!sg_page(sg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) sg = sg_next(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) if (!sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) while (new_sgl && sg && max_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) new_len = new_sgl->length > max_len ? max_len : new_sgl->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) sg_set_page(sg, sg_page(new_sgl), new_len, new_sgl->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) sg_last = sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) sg = sg_next(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) new_sgl = sg_next(new_sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) max_len -= new_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return sg_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static int qce_dma_prep_sg(struct dma_chan *chan, struct scatterlist *sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) int nents, unsigned long flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) enum dma_transfer_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) dma_async_tx_callback cb, void *cb_param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct dma_async_tx_descriptor *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (!sg || !nents)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) desc = dmaengine_prep_slave_sg(chan, sg, nents, dir, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) desc->callback = cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) desc->callback_param = cb_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) cookie = dmaengine_submit(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) return dma_submit_error(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *rx_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) int rx_nents, struct scatterlist *tx_sg, int tx_nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) dma_async_tx_callback cb, void *cb_param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct dma_chan *rxchan = dma->rxchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct dma_chan *txchan = dma->txchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) ret = qce_dma_prep_sg(rxchan, rx_sg, rx_nents, flags, DMA_MEM_TO_DEV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) return qce_dma_prep_sg(txchan, tx_sg, tx_nents, flags, DMA_DEV_TO_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) cb, cb_param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) void qce_dma_issue_pending(struct qce_dma_data *dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) dma_async_issue_pending(dma->rxchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) dma_async_issue_pending(dma->txchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) int qce_dma_terminate_all(struct qce_dma_data *dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) ret = dmaengine_terminate_all(dma->rxchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) return ret ?: dmaengine_terminate_all(dma->txchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }