^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Virtual DMA channel support for DMAengine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2012 Russell King
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #ifndef VIRT_DMA_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #define VIRT_DMA_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "dmaengine.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) struct virt_dma_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) struct dma_async_tx_descriptor tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) struct dmaengine_result tx_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) /* protected by vc.lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) struct virt_dma_chan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) struct dma_chan chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct tasklet_struct task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) void (*desc_free)(struct virt_dma_desc *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /* protected by vc.lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct list_head desc_allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct list_head desc_submitted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) struct list_head desc_issued;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct list_head desc_completed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct list_head desc_terminated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct virt_dma_desc *cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) return container_of(chan, struct virt_dma_chan, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) extern int vchan_tx_desc_free(struct dma_async_tx_descriptor *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * vchan_tx_prep - prepare a descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * @vc: virtual channel allocating this descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * @vd: virtual descriptor to prepare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * @tx_flags: flags argument passed in to prepare function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct virt_dma_desc *vd, unsigned long tx_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) vd->tx.flags = tx_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) vd->tx.tx_submit = vchan_tx_submit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) vd->tx.desc_free = vchan_tx_desc_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) vd->tx_result.result = DMA_TRANS_NOERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) vd->tx_result.residue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) spin_lock_irqsave(&vc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) list_add_tail(&vd->node, &vc->desc_allocated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) spin_unlock_irqrestore(&vc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return &vd->tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * vchan_issue_pending - move submitted descriptors to issued list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * @vc: virtual channel to update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * vc.lock must be held by caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static inline bool vchan_issue_pending(struct virt_dma_chan *vc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return !list_empty(&vc->desc_issued);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * vchan_cookie_complete - report completion of a descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * @vd: virtual descriptor to update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * vc.lock must be held by caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) static inline void vchan_cookie_complete(struct virt_dma_desc *vd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) cookie = vd->tx.cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) dma_cookie_complete(&vd->tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) vd, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) list_add_tail(&vd->node, &vc->desc_completed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) tasklet_schedule(&vc->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * vchan_vdesc_fini - Free or reuse a descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * @vd: virtual descriptor to free/reuse
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static inline void vchan_vdesc_fini(struct virt_dma_desc *vd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (dmaengine_desc_test_reuse(&vd->tx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) spin_lock_irqsave(&vc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) list_add(&vd->node, &vc->desc_allocated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) spin_unlock_irqrestore(&vc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) vc->desc_free(vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * vchan_cyclic_callback - report the completion of a period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * @vd: virtual descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static inline void vchan_cyclic_callback(struct virt_dma_desc *vd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) vc->cyclic = vd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) tasklet_schedule(&vc->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * vchan_terminate_vdesc - Disable pending cyclic callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * @vd: virtual descriptor to be terminated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * vc.lock must be held by caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) list_add_tail(&vd->node, &vc->desc_terminated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) if (vc->cyclic == vd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) vc->cyclic = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * vchan_next_desc - peek at the next descriptor to be processed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * @vc: virtual channel to obtain descriptor from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * vc.lock must be held by caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return list_first_entry_or_null(&vc->desc_issued,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct virt_dma_desc, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * vchan_get_all_descriptors - obtain all submitted and issued descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * @vc: virtual channel to get descriptors from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * @head: list of descriptors found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * vc.lock must be held by caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * Removes all submitted and issued descriptors from internal lists, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * provides a list of all descriptors found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct list_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) list_splice_tail_init(&vc->desc_allocated, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) list_splice_tail_init(&vc->desc_submitted, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) list_splice_tail_init(&vc->desc_issued, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) list_splice_tail_init(&vc->desc_completed, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) list_splice_tail_init(&vc->desc_terminated, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) struct virt_dma_desc *vd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) LIST_HEAD(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) spin_lock_irqsave(&vc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) vchan_get_all_descriptors(vc, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) list_for_each_entry(vd, &head, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) dmaengine_desc_clear_reuse(&vd->tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) spin_unlock_irqrestore(&vc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) vchan_dma_desc_free_list(vc, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * vchan_synchronize() - synchronize callback execution to the current context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * @vc: virtual channel to synchronize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * Makes sure that all scheduled or active callbacks have finished running. For
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * proper operation the caller has to ensure that no new callbacks are scheduled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * after the invocation of this function started.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * Free up the terminated cyclic descriptor to prevent memory leakage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) static inline void vchan_synchronize(struct virt_dma_chan *vc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) LIST_HEAD(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) tasklet_kill(&vc->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) spin_lock_irqsave(&vc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) list_splice_tail_init(&vc->desc_terminated, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) spin_unlock_irqrestore(&vc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) vchan_dma_desc_free_list(vc, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) #endif