^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * copy offload engine support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright © 2006, Intel Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Dan Williams <dan.j.williams@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * with architecture considerations by:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Neil Brown <neilb@suse.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Jeff Garzik <jeff@garzik.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/async_tx.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * async_memcpy - attempt to copy memory with a dma engine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * @dest: destination page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * @src: src page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * @dest_offset: offset into 'dest' to start transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * @src_offset: offset into 'src' to start transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * @len: length in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * @submit: submission / completion modifiers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * honored flags: ASYNC_TX_ACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) unsigned int src_offset, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct async_submit_ctl *submit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct dma_chan *chan = async_tx_find_channel(submit, DMA_MEMCPY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) &dest, 1, &src, 1, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct dma_device *device = chan ? chan->device : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct dma_async_tx_descriptor *tx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct dmaengine_unmap_data *unmap = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) if (device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) unsigned long dma_prep_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) if (submit->cb_fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) dma_prep_flags |= DMA_PREP_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) if (submit->flags & ASYNC_TX_FENCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) dma_prep_flags |= DMA_PREP_FENCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) unmap->to_cnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) unmap->addr[0] = dma_map_page(device->dev, src, src_offset, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) unmap->from_cnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) unmap->addr[1] = dma_map_page(device->dev, dest, dest_offset, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) unmap->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) tx = device->device_prep_dma_memcpy(chan, unmap->addr[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) unmap->addr[0], len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) dma_prep_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (tx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) pr_debug("%s: (async) len: %zu\n", __func__, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) dma_set_unmap(tx, unmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) async_tx_submit(chan, tx, submit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) void *dest_buf, *src_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) pr_debug("%s: (sync) len: %zu\n", __func__, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /* wait for any prerequisite operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) async_tx_quiesce(&submit->depend_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) dest_buf = kmap_atomic(dest) + dest_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) src_buf = kmap_atomic(src) + src_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) memcpy(dest_buf, src_buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) kunmap_atomic(src_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) kunmap_atomic(dest_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) async_tx_sync_epilog(submit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) dmaengine_unmap_put(unmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) return tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) EXPORT_SYMBOL_GPL(async_memcpy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) MODULE_AUTHOR("Intel Corporation");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) MODULE_DESCRIPTION("asynchronous memcpy api");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) MODULE_LICENSE("GPL");