^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Qualcomm Technologies HIDMA data structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #ifndef QCOM_HIDMA_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #define QCOM_HIDMA_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/kfifo.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #define HIDMA_TRE_SIZE 32 /* each TRE is 32 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define HIDMA_TRE_CFG_IDX 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define HIDMA_TRE_LEN_IDX 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define HIDMA_TRE_SRC_LOW_IDX 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define HIDMA_TRE_SRC_HI_IDX 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define HIDMA_TRE_DEST_LOW_IDX 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define HIDMA_TRE_DEST_HI_IDX 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) enum tre_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) HIDMA_TRE_MEMCPY = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) HIDMA_TRE_MEMSET = 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct hidma_tre {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) atomic_t allocated; /* if this channel is allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) bool queued; /* flag whether this is pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) u16 status; /* status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) u32 idx; /* index of the tre */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) u32 dma_sig; /* signature of the tre */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) const char *dev_name; /* name of the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) void (*callback)(void *data); /* requester callback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) void *data; /* Data associated with this channel*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct hidma_lldev *lldev; /* lldma device pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) u32 tre_local[HIDMA_TRE_SIZE / sizeof(u32) + 1]; /* TRE local copy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) u32 tre_index; /* the offset where this was written*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) u32 int_flags; /* interrupt flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) u8 err_info; /* error record in this transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) u8 err_code; /* completion code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct hidma_lldev {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) bool msi_support; /* flag indicating MSI support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) bool initialized; /* initialized flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) u8 trch_state; /* trch_state of the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) u8 evch_state; /* evch_state of the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) u8 chidx; /* channel index in the core */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) u32 nr_tres; /* max number of configs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) spinlock_t lock; /* reentrancy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct hidma_tre *trepool; /* trepool of user configs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct device *dev; /* device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) void __iomem *trca; /* Transfer Channel address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) void __iomem *evca; /* Event Channel address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct hidma_tre
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) **pending_tre_list; /* Pointers to pending TREs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) atomic_t pending_tre_count; /* Number of TREs pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) void *tre_ring; /* TRE ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) dma_addr_t tre_dma; /* TRE ring to be shared with HW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) u32 tre_ring_size; /* Byte size of the ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) u32 tre_processed_off; /* last processed TRE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) void *evre_ring; /* EVRE ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) dma_addr_t evre_dma; /* EVRE ring to be shared with HW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) u32 evre_ring_size; /* Byte size of the ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) u32 evre_processed_off; /* last processed EVRE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) u32 tre_write_offset; /* TRE write location */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct tasklet_struct task; /* task delivering notifications */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) DECLARE_KFIFO_PTR(handoff_fifo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct hidma_tre *); /* pending TREs FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct hidma_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct dma_async_tx_descriptor desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /* link list node for this channel*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) u32 tre_ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct hidma_chan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) bool paused;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) bool allocated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) char dbg_name[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) u32 dma_sig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) dma_cookie_t last_success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * active descriptor on this channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * It is used by the DMA complete notification to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * locate the descriptor that initiated the transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct hidma_dev *dmadev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct hidma_desc *running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) struct dma_chan chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct list_head free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct list_head prepared;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct list_head queued;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct list_head active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct list_head completed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /* Lock for this structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct hidma_dev {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) int chidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) u32 nr_descriptors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) int msi_virqbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct hidma_lldev *lldev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) void __iomem *dev_trca;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct resource *trca_resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) void __iomem *dev_evca;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct resource *evca_resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /* used to protect the pending channel list*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct dma_device ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct dentry *debugfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /* sysfs entry for the channel id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct device_attribute *chid_attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /* Task delivering issue_pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct tasklet_struct task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) int hidma_ll_request(struct hidma_lldev *llhndl, u32 dev_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) const char *dev_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) void (*callback)(void *data), void *data, u32 *tre_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) void hidma_ll_free(struct hidma_lldev *llhndl, u32 tre_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) enum dma_status hidma_ll_status(struct hidma_lldev *llhndl, u32 tre_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) bool hidma_ll_isenabled(struct hidma_lldev *llhndl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) void hidma_ll_queue_request(struct hidma_lldev *llhndl, u32 tre_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) void hidma_ll_start(struct hidma_lldev *llhndl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) int hidma_ll_disable(struct hidma_lldev *lldev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) int hidma_ll_enable(struct hidma_lldev *llhndl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) void hidma_ll_set_transfer_params(struct hidma_lldev *llhndl, u32 tre_ch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) dma_addr_t src, dma_addr_t dest, u32 len, u32 flags, u32 txntype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) void hidma_ll_setup_irq(struct hidma_lldev *lldev, bool msi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) int hidma_ll_setup(struct hidma_lldev *lldev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct hidma_lldev *hidma_ll_init(struct device *dev, u32 max_channels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) void __iomem *trca, void __iomem *evca,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) u8 chidx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) int hidma_ll_uninit(struct hidma_lldev *llhndl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) irqreturn_t hidma_ll_inthandler(int irq, void *arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) irqreturn_t hidma_ll_inthandler_msi(int irq, void *arg, int cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) void hidma_cleanup_pending_tre(struct hidma_lldev *llhndl, u8 err_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) u8 err_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) void hidma_debug_init(struct hidma_dev *dmadev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) void hidma_debug_uninit(struct hidma_dev *dmadev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #endif