^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * fence-chain: chain fences together in a timeline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2018 Advanced Micro Devices, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Authors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Christian König <christian.koenig@amd.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/dma-fence-chain.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) static bool dma_fence_chain_enable_signaling(struct dma_fence *fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * dma_fence_chain_get_prev - use RCU to get a reference to the previous fence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * @chain: chain node to get the previous node from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * Use dma_fence_get_rcu_safe to get a reference to the previous fence of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * chain node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static struct dma_fence *dma_fence_chain_get_prev(struct dma_fence_chain *chain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) struct dma_fence *prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) prev = dma_fence_get_rcu_safe(&chain->prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) return prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * dma_fence_chain_walk - chain walking function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * @fence: current chain node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * Walk the chain to the next node. Returns the next fence or NULL if we are at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * the end of the chain. Garbage collects chain nodes which are already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * signaled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct dma_fence_chain *chain, *prev_chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct dma_fence *prev, *replacement, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) chain = to_dma_fence_chain(fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) if (!chain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) dma_fence_put(fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) while ((prev = dma_fence_chain_get_prev(chain))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) prev_chain = to_dma_fence_chain(prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) if (prev_chain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) if (!dma_fence_is_signaled(prev_chain->fence))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) replacement = dma_fence_chain_get_prev(prev_chain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if (!dma_fence_is_signaled(prev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) replacement = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) tmp = cmpxchg((struct dma_fence __force **)&chain->prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) prev, replacement);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) if (tmp == prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) dma_fence_put(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) dma_fence_put(replacement);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) dma_fence_put(prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) dma_fence_put(fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) return prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) EXPORT_SYMBOL(dma_fence_chain_walk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * dma_fence_chain_find_seqno - find fence chain node by seqno
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * @pfence: pointer to the chain node where to start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * @seqno: the sequence number to search for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * Advance the fence pointer to the chain node which will signal this sequence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * number. If no sequence number is provided then this is a no-op.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * Returns EINVAL if the fence is not a chain node or the sequence number has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * not yet advanced far enough.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) struct dma_fence_chain *chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (!seqno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) chain = to_dma_fence_chain(*pfence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (!chain || chain->base.seqno < seqno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) dma_fence_chain_for_each(*pfence, &chain->base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if ((*pfence)->context != chain->base.context ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) to_dma_fence_chain(*pfence)->prev_seqno < seqno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) dma_fence_put(&chain->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) EXPORT_SYMBOL(dma_fence_chain_find_seqno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static const char *dma_fence_chain_get_driver_name(struct dma_fence *fence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) return "dma_fence_chain";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static const char *dma_fence_chain_get_timeline_name(struct dma_fence *fence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) return "unbound";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static void dma_fence_chain_irq_work(struct irq_work *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct dma_fence_chain *chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) chain = container_of(work, typeof(*chain), work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /* Try to rearm the callback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (!dma_fence_chain_enable_signaling(&chain->base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /* Ok, we are done. No more unsignaled fences left */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) dma_fence_signal(&chain->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) dma_fence_put(&chain->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static void dma_fence_chain_cb(struct dma_fence *f, struct dma_fence_cb *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct dma_fence_chain *chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) chain = container_of(cb, typeof(*chain), cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) irq_work_queue(&chain->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) dma_fence_put(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static bool dma_fence_chain_enable_signaling(struct dma_fence *fence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct dma_fence_chain *head = to_dma_fence_chain(fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) dma_fence_get(&head->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) dma_fence_chain_for_each(fence, &head->base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct dma_fence_chain *chain = to_dma_fence_chain(fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct dma_fence *f = chain ? chain->fence : fence;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) dma_fence_get(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (!dma_fence_add_callback(f, &head->cb, dma_fence_chain_cb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) dma_fence_put(fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) dma_fence_put(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) dma_fence_put(&head->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static bool dma_fence_chain_signaled(struct dma_fence *fence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) dma_fence_chain_for_each(fence, fence) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct dma_fence_chain *chain = to_dma_fence_chain(fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct dma_fence *f = chain ? chain->fence : fence;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if (!dma_fence_is_signaled(f)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) dma_fence_put(fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static void dma_fence_chain_release(struct dma_fence *fence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct dma_fence_chain *chain = to_dma_fence_chain(fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct dma_fence *prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /* Manually unlink the chain as much as possible to avoid recursion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * and potential stack overflow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) while ((prev = rcu_dereference_protected(chain->prev, true))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) struct dma_fence_chain *prev_chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (kref_read(&prev->refcount) > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) prev_chain = to_dma_fence_chain(prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (!prev_chain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /* No need for atomic operations since we hold the last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * reference to prev_chain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) chain->prev = prev_chain->prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) RCU_INIT_POINTER(prev_chain->prev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) dma_fence_put(prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) dma_fence_put(prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) dma_fence_put(chain->fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) dma_fence_free(fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) const struct dma_fence_ops dma_fence_chain_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) .use_64bit_seqno = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) .get_driver_name = dma_fence_chain_get_driver_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) .get_timeline_name = dma_fence_chain_get_timeline_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) .enable_signaling = dma_fence_chain_enable_signaling,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) .signaled = dma_fence_chain_signaled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) .release = dma_fence_chain_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) EXPORT_SYMBOL(dma_fence_chain_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * dma_fence_chain_init - initialize a fence chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * @chain: the chain node to initialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * @prev: the previous fence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * @fence: the current fence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * @seqno: the sequence number to use for the fence chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * Initialize a new chain node and either start a new chain or add the node to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * the existing chain of the previous fence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) void dma_fence_chain_init(struct dma_fence_chain *chain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) struct dma_fence *prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct dma_fence *fence,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) uint64_t seqno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) struct dma_fence_chain *prev_chain = to_dma_fence_chain(prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) uint64_t context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) spin_lock_init(&chain->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) rcu_assign_pointer(chain->prev, prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) chain->fence = fence;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) chain->prev_seqno = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) init_irq_work(&chain->work, dma_fence_chain_irq_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) /* Try to reuse the context of the previous chain node. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (prev_chain && __dma_fence_is_later(seqno, prev->seqno, prev->ops)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) context = prev->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) chain->prev_seqno = prev->seqno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) context = dma_fence_context_alloc(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /* Make sure that we always have a valid sequence number. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (prev_chain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) seqno = max(prev->seqno, seqno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) dma_fence_init(&chain->base, &dma_fence_chain_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) &chain->lock, context, seqno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) EXPORT_SYMBOL(dma_fence_chain_init);