^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Based on bo.c which bears the following copyright notice,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * but is dual licensed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Permission is hereby granted, free of charge, to any person obtaining a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * copy of this software and associated documentation files (the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * "Software"), to deal in the Software without restriction, including
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * without limitation the rights to use, copy, modify, merge, publish,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * distribute, sub license, and/or sell copies of the Software, and to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * permit persons to whom the Software is furnished to do so, subject to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * the following conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * The above copyright notice and this permission notice (including the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * next paragraph) shall be included in all copies or substantial portions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * of the Software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * USE OR OTHER DEALINGS IN THE SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) **************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/dma-resv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/dma-fence-array.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/sched/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/mmu_notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * DOC: Reservation Object Overview
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * The reservation object provides a mechanism to manage shared and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * exclusive fences associated with a buffer. A reservation object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * can have attached one exclusive fence (normally associated with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * write operations) or N shared fences (read operations). The RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * mechanism is used to protect read access to fences from locked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * write-side updates.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) DEFINE_WD_CLASS(reservation_ww_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) EXPORT_SYMBOL(reservation_ww_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * dma_resv_list_alloc - allocate fence list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * @shared_max: number of fences we need space for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * Allocate a new dma_resv_list and make sure to correctly initialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * shared_max.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct dma_resv_list *list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) list = kmalloc(offsetof(typeof(*list), shared[shared_max]), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (!list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) list->shared_max = (ksize(list) - offsetof(typeof(*list), shared)) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) sizeof(*list->shared);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * dma_resv_list_free - free fence list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * @list: list to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * Free a dma_resv_list and make sure to drop all references.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static void dma_resv_list_free(struct dma_resv_list *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) if (!list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) for (i = 0; i < list->shared_count; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) dma_fence_put(rcu_dereference_protected(list->shared[i], true));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) kfree_rcu(list, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #if IS_ENABLED(CONFIG_LOCKDEP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) static int __init dma_resv_lockdep(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) struct mm_struct *mm = mm_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct ww_acquire_ctx ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct dma_resv obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct address_space mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (!mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) dma_resv_init(&obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) address_space_init_once(&mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) mmap_read_lock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) ww_acquire_init(&ctx, &reservation_ww_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) ret = dma_resv_lock(&obj, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (ret == -EDEADLK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) dma_resv_lock_slow(&obj, &ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) fs_reclaim_acquire(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /* for unmap_mapping_range on trylocked buffer objects in shrinkers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) i_mmap_lock_write(&mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) i_mmap_unlock_write(&mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #ifdef CONFIG_MMU_NOTIFIER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) __dma_fence_might_wait();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) lock_map_release(&__mmu_notifier_invalidate_range_start_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) __dma_fence_might_wait();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) fs_reclaim_release(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) ww_mutex_unlock(&obj.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) ww_acquire_fini(&ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) mmput(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) subsys_initcall(dma_resv_lockdep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * dma_resv_init - initialize a reservation object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * @obj: the reservation object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) void dma_resv_init(struct dma_resv *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) ww_mutex_init(&obj->lock, &reservation_ww_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) seqcount_ww_mutex_init(&obj->seq, &obj->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) RCU_INIT_POINTER(obj->fence, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) RCU_INIT_POINTER(obj->fence_excl, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) EXPORT_SYMBOL(dma_resv_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * dma_resv_fini - destroys a reservation object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * @obj: the reservation object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) void dma_resv_fini(struct dma_resv *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct dma_resv_list *fobj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct dma_fence *excl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * This object should be dead and all references must have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * been released to it, so no need to be protected with rcu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) excl = rcu_dereference_protected(obj->fence_excl, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (excl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) dma_fence_put(excl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) fobj = rcu_dereference_protected(obj->fence, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) dma_resv_list_free(fobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) ww_mutex_destroy(&obj->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) EXPORT_SYMBOL(dma_resv_fini);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * dma_resv_reserve_shared - Reserve space to add shared fences to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * a dma_resv.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * @obj: reservation object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * @num_fences: number of fences we want to add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * Should be called before dma_resv_add_shared_fence(). Must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * be called with obj->lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * RETURNS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * Zero for success, or -errno
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct dma_resv_list *old, *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) unsigned int i, j, k, max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) dma_resv_assert_held(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) old = dma_resv_get_list(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (old && old->shared_max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if ((old->shared_count + num_fences) <= old->shared_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) max = max(old->shared_count + num_fences,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) old->shared_max * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) max = max(4ul, roundup_pow_of_two(num_fences));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) new = dma_resv_list_alloc(max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (!new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * no need to bump fence refcounts, rcu_read access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * requires the use of kref_get_unless_zero, and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * references from the old struct are carried over to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * the new.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) for (i = 0, j = 0, k = max; i < (old ? old->shared_count : 0); ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct dma_fence *fence;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) fence = rcu_dereference_protected(old->shared[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) dma_resv_held(obj));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (dma_fence_is_signaled(fence))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) RCU_INIT_POINTER(new->shared[--k], fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) RCU_INIT_POINTER(new->shared[j++], fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) new->shared_count = j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * We are not changing the effective set of fences here so can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * merely update the pointer to the new array; both existing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * readers and new readers will see exactly the same set of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * active (unsignaled) shared fences. Individual fences and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * old array are protected by RCU and so will not vanish under
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * the gaze of the rcu_read_lock() readers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) rcu_assign_pointer(obj->fence, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (!old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) /* Drop the references to the signaled fences */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) for (i = k; i < max; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct dma_fence *fence;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) fence = rcu_dereference_protected(new->shared[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) dma_resv_held(obj));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) dma_fence_put(fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) kfree_rcu(old, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) EXPORT_SYMBOL(dma_resv_reserve_shared);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * dma_resv_add_shared_fence - Add a fence to a shared slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * @obj: the reservation object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * @fence: the shared fence to add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * Add a fence to a shared slot, obj->lock must be held, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * dma_resv_reserve_shared() has been called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) struct dma_resv_list *fobj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) struct dma_fence *old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) unsigned int i, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) dma_fence_get(fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) dma_resv_assert_held(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) fobj = dma_resv_get_list(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) count = fobj->shared_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) write_seqcount_begin(&obj->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) for (i = 0; i < count; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) old = rcu_dereference_protected(fobj->shared[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) dma_resv_held(obj));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (old->context == fence->context ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) dma_fence_is_signaled(old))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) goto replace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) BUG_ON(fobj->shared_count >= fobj->shared_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) old = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) replace:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) RCU_INIT_POINTER(fobj->shared[i], fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) /* pointer update must be visible before we extend the shared_count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) smp_store_mb(fobj->shared_count, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) write_seqcount_end(&obj->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) dma_fence_put(old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) EXPORT_SYMBOL(dma_resv_add_shared_fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * dma_resv_add_excl_fence - Add an exclusive fence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * @obj: the reservation object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * @fence: the shared fence to add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * Add a fence to the exclusive slot. The obj->lock must be held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) struct dma_fence *old_fence = dma_resv_get_excl(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct dma_resv_list *old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) u32 i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) dma_resv_assert_held(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) old = dma_resv_get_list(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) i = old->shared_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (fence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) dma_fence_get(fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) write_seqcount_begin(&obj->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) /* write_seqcount_begin provides the necessary memory barrier */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) RCU_INIT_POINTER(obj->fence_excl, fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) old->shared_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) write_seqcount_end(&obj->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) /* inplace update, no shared fences */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) while (i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) dma_fence_put(rcu_dereference_protected(old->shared[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) dma_resv_held(obj)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) dma_fence_put(old_fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) EXPORT_SYMBOL(dma_resv_add_excl_fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * dma_resv_copy_fences - Copy all fences from src to dst.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * @dst: the destination reservation object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * @src: the source reservation object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * Copy all fences from src to dst. dst-lock must be held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) struct dma_resv_list *src_list, *dst_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) struct dma_fence *old, *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) dma_resv_assert_held(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) src_list = rcu_dereference(src->fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (src_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) unsigned shared_count = src_list->shared_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) dst_list = dma_resv_list_alloc(shared_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (!dst_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) src_list = rcu_dereference(src->fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (!src_list || src_list->shared_count > shared_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) kfree(dst_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) dst_list->shared_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) for (i = 0; i < src_list->shared_count; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) struct dma_fence *fence;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) fence = rcu_dereference(src_list->shared[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) &fence->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (!dma_fence_get_rcu(fence)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) dma_resv_list_free(dst_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) src_list = rcu_dereference(src->fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (dma_fence_is_signaled(fence)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) dma_fence_put(fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) rcu_assign_pointer(dst_list->shared[dst_list->shared_count++], fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) dst_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) new = dma_fence_get_rcu_safe(&src->fence_excl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) src_list = dma_resv_get_list(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) old = dma_resv_get_excl(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) write_seqcount_begin(&dst->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /* write_seqcount_begin provides the necessary memory barrier */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) RCU_INIT_POINTER(dst->fence_excl, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) RCU_INIT_POINTER(dst->fence, dst_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) write_seqcount_end(&dst->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) dma_resv_list_free(src_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) dma_fence_put(old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) EXPORT_SYMBOL(dma_resv_copy_fences);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * dma_resv_get_fences_rcu - Get an object's shared and exclusive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * fences without update side lock held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * @obj: the reservation object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * @pfence_excl: the returned exclusive fence (or NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * @pshared_count: the number of shared fences returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * @pshared: the array of shared fence ptrs returned (array is krealloc'd to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * the required size, and must be freed by caller)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * Retrieve all fences from the reservation object. If the pointer for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * exclusive fence is not specified the fence is put into the array of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * shared fences as well. Returns either zero or -ENOMEM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) int dma_resv_get_fences_rcu(struct dma_resv *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) struct dma_fence **pfence_excl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) unsigned *pshared_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) struct dma_fence ***pshared)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) struct dma_fence **shared = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) struct dma_fence *fence_excl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) unsigned int shared_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) int ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) struct dma_resv_list *fobj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) unsigned int i, seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) size_t sz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) shared_count = i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) seq = read_seqcount_begin(&obj->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) fence_excl = rcu_dereference(obj->fence_excl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if (fence_excl && !dma_fence_get_rcu(fence_excl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) fobj = rcu_dereference(obj->fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (fobj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) sz += sizeof(*shared) * fobj->shared_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (!pfence_excl && fence_excl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) sz += sizeof(*shared);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (sz) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) struct dma_fence **nshared;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) nshared = krealloc(shared, sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) GFP_NOWAIT | __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (!nshared) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) dma_fence_put(fence_excl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) fence_excl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) nshared = krealloc(shared, sz, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) if (nshared) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) shared = nshared;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) shared = nshared;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) shared_count = fobj ? fobj->shared_count : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) for (i = 0; i < shared_count; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) shared[i] = rcu_dereference(fobj->shared[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) if (!dma_fence_get_rcu(shared[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) while (i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) dma_fence_put(shared[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) dma_fence_put(fence_excl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) } while (ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (pfence_excl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) *pfence_excl = fence_excl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) else if (fence_excl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) shared[shared_count++] = fence_excl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) if (!shared_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) kfree(shared);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) shared = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) *pshared_count = shared_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) *pshared = shared;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * dma_resv_get_singleton - Get a single fence for all the fences
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * @obj: the reservation object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * @write: true if we should return all fences
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * @fence: the resulting fence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * Get a single fence representing all the fences inside the resv object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * Returns either 0 for success or -ENOMEM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * Warning: This can't be used like this when adding the fence back to the resv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * object since that can lead to stack corruption when finalizing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * dma_fence_array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * Returns 0 on success and negative error values on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) int dma_resv_get_singleton(struct dma_resv *obj, bool write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) struct dma_fence **fence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) struct dma_fence_array *array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) struct dma_fence **fences;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) unsigned count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) if (!write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) *fence = dma_resv_get_excl_rcu(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (count == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) *fence = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (count == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) *fence = fences[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) kfree(fences);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) array = dma_fence_array_create(count, fences,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) dma_fence_context_alloc(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 1, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (!array) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) while (count--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) dma_fence_put(fences[count]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) kfree(fences);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) *fence = &array->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) EXPORT_SYMBOL_GPL(dma_resv_get_singleton);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * dma_resv_wait_timeout_rcu - Wait on reservation's objects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * shared and/or exclusive fences.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * @obj: the reservation object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) * @wait_all: if true, wait on all fences, else wait on just exclusive fence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * @intr: if true, do interruptible wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * @timeout: timeout value in jiffies or zero to return immediately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * RETURNS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) * greater than zer on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) bool wait_all, bool intr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) unsigned long timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) struct dma_fence *fence;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) unsigned seq, shared_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) long ret = timeout ? timeout : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) shared_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) seq = read_seqcount_begin(&obj->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) i = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) fence = rcu_dereference(obj->fence_excl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (!dma_fence_get_rcu(fence))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) goto unlock_retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) if (dma_fence_is_signaled(fence)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) dma_fence_put(fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) fence = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) fence = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (wait_all) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) struct dma_resv_list *fobj = rcu_dereference(obj->fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) if (fobj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) shared_count = fobj->shared_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) for (i = 0; !fence && i < shared_count; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) struct dma_fence *lfence = rcu_dereference(fobj->shared[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) &lfence->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (!dma_fence_get_rcu(lfence))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) goto unlock_retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) if (dma_fence_is_signaled(lfence)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) dma_fence_put(lfence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) fence = lfence;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (fence) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) if (read_seqcount_retry(&obj->seq, seq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) dma_fence_put(fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) ret = dma_fence_wait_timeout(fence, intr, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) dma_fence_put(fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) if (ret > 0 && wait_all && (i + 1 < shared_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) unlock_retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) EXPORT_SYMBOL_GPL(dma_resv_wait_timeout_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) struct dma_fence *fence, *lfence = passed_fence;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) int ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) fence = dma_fence_get_rcu(lfence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) if (!fence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) ret = !!dma_fence_is_signaled(fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) dma_fence_put(fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * dma_resv_test_signaled_rcu - Test if a reservation object's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * fences have been signaled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) * @obj: the reservation object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) * @test_all: if true, test all fences, otherwise only test the exclusive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) * fence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) * RETURNS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) * true if all fences signaled, else false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) unsigned seq, shared_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) shared_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) seq = read_seqcount_begin(&obj->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) if (test_all) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) struct dma_resv_list *fobj = rcu_dereference(obj->fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) if (fobj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) shared_count = fobj->shared_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) for (i = 0; i < shared_count; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) ret = dma_resv_test_signaled_single(fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) else if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (read_seqcount_retry(&obj->seq, seq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) if (!shared_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) struct dma_fence *fence_excl = rcu_dereference(obj->fence_excl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) if (fence_excl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) ret = dma_resv_test_signaled_single(fence_excl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) if (read_seqcount_retry(&obj->seq, seq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu);