^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * The contents of this file are private to DMA engine drivers, and is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * part of the API to be used by DMA engine users.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #ifndef DMAENGINE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #define DMAENGINE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * dma_cookie_init - initialize the cookies for a DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * @chan: dma channel to initialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) static inline void dma_cookie_init(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) chan->cookie = DMA_MIN_COOKIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) chan->completed_cookie = DMA_MIN_COOKIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * dma_cookie_assign - assign a DMA engine cookie to the descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * @tx: descriptor needing cookie
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * Assign a unique non-zero per-channel cookie to the descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * Note: caller is expected to hold a lock to prevent concurrency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static inline dma_cookie_t dma_cookie_assign(struct dma_async_tx_descriptor *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct dma_chan *chan = tx->chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) cookie = chan->cookie + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) if (cookie < DMA_MIN_COOKIE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) cookie = DMA_MIN_COOKIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) tx->cookie = chan->cookie = cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) return cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * dma_cookie_complete - complete a descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * @tx: descriptor to complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * Mark this descriptor complete by updating the channels completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * cookie marker. Zero the descriptors cookie to prevent accidental
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * repeated completions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * Note: caller is expected to hold a lock to prevent concurrency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static inline void dma_cookie_complete(struct dma_async_tx_descriptor *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) BUG_ON(tx->cookie < DMA_MIN_COOKIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) tx->chan->completed_cookie = tx->cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) tx->cookie = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * dma_cookie_status - report cookie status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * @chan: dma channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * @cookie: cookie we are interested in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * @state: dma_tx_state structure to return last/used cookies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * Report the status of the cookie, filling in the state structure if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * non-NULL. No locking is required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static inline enum dma_status dma_cookie_status(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) dma_cookie_t cookie, struct dma_tx_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) dma_cookie_t used, complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) used = chan->cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) complete = chan->completed_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) if (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) state->last = complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) state->used = used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) state->residue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) state->in_flight_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) return dma_async_is_complete(cookie, complete, used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) static inline void dma_set_residue(struct dma_tx_state *state, u32 residue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) if (state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) state->residue = residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static inline void dma_set_in_flight_bytes(struct dma_tx_state *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) u32 in_flight_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) state->in_flight_bytes = in_flight_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct dmaengine_desc_callback {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) dma_async_tx_callback callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) dma_async_tx_callback_result callback_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) void *callback_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * dmaengine_desc_get_callback - get the passed in callback function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * @tx: tx descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * @cb: temp struct to hold the callback info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * Fill the passed in cb struct with what's available in the passed in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * tx descriptor struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * No locking is required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) dmaengine_desc_get_callback(struct dma_async_tx_descriptor *tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct dmaengine_desc_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) cb->callback = tx->callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) cb->callback_result = tx->callback_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) cb->callback_param = tx->callback_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * dmaengine_desc_callback_invoke - call the callback function in cb struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * @cb: temp struct that is holding the callback info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * @result: transaction result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * Call the callback function provided in the cb struct with the parameter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * in the cb struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * Locking is dependent on the driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) dmaengine_desc_callback_invoke(struct dmaengine_desc_callback *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) const struct dmaengine_result *result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct dmaengine_result dummy_result = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) .result = DMA_TRANS_NOERROR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) .residue = 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (cb->callback_result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (!result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) result = &dummy_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) cb->callback_result(cb->callback_param, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) } else if (cb->callback) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) cb->callback(cb->callback_param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * dmaengine_desc_get_callback_invoke - get the callback in tx descriptor and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * then immediately call the callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * @tx: dma async tx descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * @result: transaction result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * Call dmaengine_desc_get_callback() and dmaengine_desc_callback_invoke()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * in a single function since no work is necessary in between for the driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * Locking is dependent on the driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) dmaengine_desc_get_callback_invoke(struct dma_async_tx_descriptor *tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) const struct dmaengine_result *result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct dmaengine_desc_callback cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) dmaengine_desc_get_callback(tx, &cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) dmaengine_desc_callback_invoke(&cb, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * dmaengine_desc_callback_valid - verify the callback is valid in cb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * @cb: callback info struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * Return a bool that verifies whether callback in cb is valid or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * No locking is required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) return cb->callback || cb->callback_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static inline struct dentry *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) dmaengine_get_debugfs_root(struct dma_device *dma_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) return dma_dev->dbg_dev_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) static inline struct dentry *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) dmaengine_get_debugfs_root(struct dma_device *dma_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) #endif /* CONFIG_DEBUG_FS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) #endif