Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * dma-fence-array: aggregate fences to be waited together
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 2016 Collabora Ltd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright (C) 2016 Advanced Micro Devices, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Authors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *	Gustavo Padovan <gustavo@padovan.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  *	Christian König <christian.koenig@amd.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/dma-fence-array.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #define PENDING_ERROR 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) static const char *dma_fence_array_get_driver_name(struct dma_fence *fence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 	return "dma_fence_array";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) static const char *dma_fence_array_get_timeline_name(struct dma_fence *fence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	return "unbound";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) static void dma_fence_array_set_pending_error(struct dma_fence_array *array,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 					      int error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	 * Propagate the first error reported by any of our fences, but only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	 * before we ourselves are signaled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 		cmpxchg(&array->base.error, PENDING_ERROR, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) static void dma_fence_array_clear_pending_error(struct dma_fence_array *array)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	/* Clear the error flag if not actually set. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	cmpxchg(&array->base.error, PENDING_ERROR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) static void irq_dma_fence_array_work(struct irq_work *wrk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	struct dma_fence_array *array = container_of(wrk, typeof(*array), work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	dma_fence_array_clear_pending_error(array);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	dma_fence_signal(&array->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	dma_fence_put(&array->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) static void dma_fence_array_cb_func(struct dma_fence *f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 				    struct dma_fence_cb *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	struct dma_fence_array_cb *array_cb =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 		container_of(cb, struct dma_fence_array_cb, cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	struct dma_fence_array *array = array_cb->array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	dma_fence_array_set_pending_error(array, f->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	if (atomic_dec_and_test(&array->num_pending))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 		irq_work_queue(&array->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 		dma_fence_put(&array->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) static bool dma_fence_array_enable_signaling(struct dma_fence *fence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	struct dma_fence_array *array = to_dma_fence_array(fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	struct dma_fence_array_cb *cb = (void *)(&array[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	for (i = 0; i < array->num_fences; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 		cb[i].array = array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		 * As we may report that the fence is signaled before all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 		 * callbacks are complete, we need to take an additional
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		 * reference count on the array so that we do not free it too
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		 * early. The core fence handling will only hold the reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		 * until we signal the array as complete (but that is now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		 * insufficient).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		dma_fence_get(&array->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		if (dma_fence_add_callback(array->fences[i], &cb[i].cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 					   dma_fence_array_cb_func)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 			int error = array->fences[i]->error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 			dma_fence_array_set_pending_error(array, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 			dma_fence_put(&array->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 			if (atomic_dec_and_test(&array->num_pending)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 				dma_fence_array_clear_pending_error(array);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 				return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static bool dma_fence_array_signaled(struct dma_fence *fence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	struct dma_fence_array *array = to_dma_fence_array(fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	if (atomic_read(&array->num_pending) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	dma_fence_array_clear_pending_error(array);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static void dma_fence_array_release(struct dma_fence *fence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	struct dma_fence_array *array = to_dma_fence_array(fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	for (i = 0; i < array->num_fences; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		dma_fence_put(array->fences[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	kfree(array->fences);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	dma_fence_free(fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) const struct dma_fence_ops dma_fence_array_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	.get_driver_name = dma_fence_array_get_driver_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	.get_timeline_name = dma_fence_array_get_timeline_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	.enable_signaling = dma_fence_array_enable_signaling,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	.signaled = dma_fence_array_signaled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	.release = dma_fence_array_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) EXPORT_SYMBOL(dma_fence_array_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)  * dma_fence_array_create - Create a custom fence array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)  * @num_fences:		[in]	number of fences to add in the array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)  * @fences:		[in]	array containing the fences
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)  * @context:		[in]	fence context to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)  * @seqno:		[in]	sequence number to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)  * @signal_on_any:	[in]	signal on any fence in the array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)  * Allocate a dma_fence_array object and initialize the base fence with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)  * dma_fence_init().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)  * In case of error it returns NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)  * The caller should allocate the fences array with num_fences size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)  * and fill it with the fences it wants to add to the object. Ownership of this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)  * array is taken and dma_fence_put() is used on each fence on release.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)  * If @signal_on_any is true the fence array signals if any fence in the array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)  * signals, otherwise it signals when all fences in the array signal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) struct dma_fence_array *dma_fence_array_create(int num_fences,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 					       struct dma_fence **fences,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 					       u64 context, unsigned seqno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 					       bool signal_on_any)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	struct dma_fence_array *array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	size_t size = sizeof(*array);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	/* Allocate the callback structures behind the array. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	size += num_fences * sizeof(struct dma_fence_array_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	array = kzalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	if (!array)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	spin_lock_init(&array->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	dma_fence_init(&array->base, &dma_fence_array_ops, &array->lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		       context, seqno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	init_irq_work(&array->work, irq_dma_fence_array_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	array->num_fences = num_fences;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	array->fences = fences;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	array->base.error = PENDING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	return array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) EXPORT_SYMBOL(dma_fence_array_create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)  * dma_fence_match_context - Check if all fences are from the given context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)  * @fence:		[in]	fence or fence array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)  * @context:		[in]	fence context to check all fences against
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)  * Checks the provided fence or, for a fence array, all fences in the array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)  * against the given context. Returns false if any fence is from a different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)  * context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) bool dma_fence_match_context(struct dma_fence *fence, u64 context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	struct dma_fence_array *array = to_dma_fence_array(fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	if (!dma_fence_is_array(fence))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		return fence->context == context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	for (i = 0; i < array->num_fences; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		if (array->fences[i]->context != context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) EXPORT_SYMBOL(dma_fence_match_context);