Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Virtual DMA channel support for DMAengine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 2012 Russell King
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include "virt-dma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 	return container_of(tx, struct virt_dma_desc, tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 	struct virt_dma_chan *vc = to_virt_chan(tx->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 	struct virt_dma_desc *vd = to_virt_desc(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	spin_lock_irqsave(&vc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	cookie = dma_cookie_assign(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	list_move_tail(&vd->node, &vc->desc_submitted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	spin_unlock_irqrestore(&vc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 		vc, vd, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	return cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) EXPORT_SYMBOL_GPL(vchan_tx_submit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  * vchan_tx_desc_free - free a reusable descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)  * @tx: the transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43)  * This function frees a previously allocated reusable descriptor. The only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44)  * other way is to clear the DMA_CTRL_REUSE flag and submit one last time the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  * transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  * Returns 0 upon success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) int vchan_tx_desc_free(struct dma_async_tx_descriptor *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	struct virt_dma_chan *vc = to_virt_chan(tx->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	struct virt_dma_desc *vd = to_virt_desc(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	spin_lock_irqsave(&vc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	list_del(&vd->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	spin_unlock_irqrestore(&vc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: freeing\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 		vc, vd, vd->tx.cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	vc->desc_free(vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) EXPORT_SYMBOL_GPL(vchan_tx_desc_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	dma_cookie_t cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	struct virt_dma_desc *vd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	list_for_each_entry(vd, &vc->desc_issued, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 		if (vd->tx.cookie == cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 			return vd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) EXPORT_SYMBOL_GPL(vchan_find_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80)  * This tasklet handles the completion of a DMA descriptor by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81)  * calling its callback and freeing it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) static void vchan_complete(struct tasklet_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	struct virt_dma_chan *vc = from_tasklet(vc, t, task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	struct virt_dma_desc *vd, *_vd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	struct dmaengine_desc_callback cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	LIST_HEAD(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	spin_lock_irq(&vc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	list_splice_tail_init(&vc->desc_completed, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	vd = vc->cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	if (vd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 		vc->cyclic = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		dmaengine_desc_get_callback(&vd->tx, &cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 		memset(&cb, 0, sizeof(cb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	spin_unlock_irq(&vc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	list_for_each_entry_safe(vd, _vd, &head, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		dmaengine_desc_get_callback(&vd->tx, &cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		list_del(&vd->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		vchan_vdesc_fini(vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	struct virt_dma_desc *vd, *_vd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	list_for_each_entry_safe(vd, _vd, head, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		list_del(&vd->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 		vchan_vdesc_fini(vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	dma_cookie_init(&vc->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	spin_lock_init(&vc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	INIT_LIST_HEAD(&vc->desc_allocated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	INIT_LIST_HEAD(&vc->desc_submitted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	INIT_LIST_HEAD(&vc->desc_issued);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	INIT_LIST_HEAD(&vc->desc_completed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	INIT_LIST_HEAD(&vc->desc_terminated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	tasklet_setup(&vc->task, vchan_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	vc->chan.device = dmadev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	list_add_tail(&vc->chan.device_node, &dmadev->channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) EXPORT_SYMBOL_GPL(vchan_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) MODULE_AUTHOR("Russell King");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) MODULE_LICENSE("GPL");