Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: MIT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Copyright © 2019 Intel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/dma-fence.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include "selftest.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) static struct kmem_cache *slab_fences;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) static struct mock_fence {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 	struct dma_fence base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 	struct spinlock lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) } *to_mock_fence(struct dma_fence *f) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	return container_of(f, struct mock_fence, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) static const char *mock_name(struct dma_fence *f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	return "mock";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) static void mock_fence_release(struct dma_fence *f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	kmem_cache_free(slab_fences, to_mock_fence(f));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) struct wait_cb {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	struct dma_fence_cb cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	struct task_struct *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) static void mock_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	wake_up_process(container_of(cb, struct wait_cb, cb)->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) static long mock_wait(struct dma_fence *f, bool intr, long timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	const int state = intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	struct wait_cb cb = { .task = current };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	if (dma_fence_add_callback(f, &cb.cb, mock_wakeup))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 		return timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	while (timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 		set_current_state(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 		if (signal_pending_state(state, current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 		timeout = schedule_timeout(timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	__set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	if (!dma_fence_remove_callback(f, &cb.cb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 		return timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	if (signal_pending_state(state, current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 		return -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	return -ETIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) static const struct dma_fence_ops mock_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	.get_driver_name = mock_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	.get_timeline_name = mock_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	.wait = mock_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	.release = mock_fence_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) static struct dma_fence *mock_fence(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	struct mock_fence *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	f = kmem_cache_alloc(slab_fences, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	if (!f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	spin_lock_init(&f->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	dma_fence_init(&f->base, &mock_ops, &f->lock, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	return &f->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) static int sanitycheck(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	struct dma_fence *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	f = mock_fence();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	if (!f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	dma_fence_signal(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	dma_fence_put(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static int test_signaling(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	struct dma_fence *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	f = mock_fence();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	if (!f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	if (dma_fence_is_signaled(f)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		pr_err("Fence unexpectedly signaled on creation\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 		goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	if (dma_fence_signal(f)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 		pr_err("Fence reported being already signaled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 		goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	if (!dma_fence_is_signaled(f)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		pr_err("Fence not reporting signaled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	if (!dma_fence_signal(f)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		pr_err("Fence reported not being already signaled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	dma_fence_put(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct simple_cb {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	struct dma_fence_cb cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	bool seen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static void simple_callback(struct dma_fence *f, struct dma_fence_cb *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	smp_store_mb(container_of(cb, struct simple_cb, cb)->seen, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static int test_add_callback(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	struct simple_cb cb = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	struct dma_fence *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	f = mock_fence();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	if (!f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	if (dma_fence_add_callback(f, &cb.cb, simple_callback)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		pr_err("Failed to add callback, fence already signaled!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	dma_fence_signal(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	if (!cb.seen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		pr_err("Callback failed!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 		goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	dma_fence_put(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static int test_late_add_callback(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	struct simple_cb cb = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	struct dma_fence *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	f = mock_fence();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	if (!f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	dma_fence_signal(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	if (!dma_fence_add_callback(f, &cb.cb, simple_callback)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		pr_err("Added callback, but fence was already signaled!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	dma_fence_signal(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	if (cb.seen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		pr_err("Callback called after failed attachment !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	dma_fence_put(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static int test_rm_callback(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	struct simple_cb cb = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	struct dma_fence *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	f = mock_fence();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	if (!f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	if (dma_fence_add_callback(f, &cb.cb, simple_callback)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		pr_err("Failed to add callback, fence already signaled!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	if (!dma_fence_remove_callback(f, &cb.cb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		pr_err("Failed to remove callback!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 		goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	dma_fence_signal(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	if (cb.seen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		pr_err("Callback still signaled after removal!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	dma_fence_put(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) static int test_late_rm_callback(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	struct simple_cb cb = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	struct dma_fence *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	f = mock_fence();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	if (!f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	if (dma_fence_add_callback(f, &cb.cb, simple_callback)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 		pr_err("Failed to add callback, fence already signaled!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 		goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	dma_fence_signal(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	if (!cb.seen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 		pr_err("Callback failed!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	if (dma_fence_remove_callback(f, &cb.cb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 		pr_err("Callback removal succeed after being executed!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	dma_fence_put(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) static int test_status(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	struct dma_fence *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	f = mock_fence();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	if (!f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	if (dma_fence_get_status(f)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 		pr_err("Fence unexpectedly has signaled status on creation\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 		goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	dma_fence_signal(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	if (!dma_fence_get_status(f)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 		pr_err("Fence not reporting signaled status\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 		goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	dma_fence_put(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) static int test_error(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	struct dma_fence *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	f = mock_fence();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	if (!f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	dma_fence_set_error(f, -EIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	if (dma_fence_get_status(f)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 		pr_err("Fence unexpectedly has error status before signal\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 		goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	dma_fence_signal(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	if (dma_fence_get_status(f) != -EIO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 		pr_err("Fence not reporting error status, got %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		       dma_fence_get_status(f));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 		goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	dma_fence_put(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) static int test_wait(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	struct dma_fence *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	f = mock_fence();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	if (!f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	if (dma_fence_wait_timeout(f, false, 0) != -ETIME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 		pr_err("Wait reported complete before being signaled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	dma_fence_signal(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	if (dma_fence_wait_timeout(f, false, 0) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		pr_err("Wait reported incomplete after being signaled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 		goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	dma_fence_signal(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	dma_fence_put(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct wait_timer {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	struct timer_list timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	struct dma_fence *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) static void wait_timer(struct timer_list *timer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	struct wait_timer *wt = from_timer(wt, timer, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	dma_fence_signal(wt->f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) static int test_wait_timeout(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	struct wait_timer wt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	timer_setup_on_stack(&wt.timer, wait_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	wt.f = mock_fence();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	if (!wt.f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	if (dma_fence_wait_timeout(wt.f, false, 1) != -ETIME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 		pr_err("Wait reported complete before being signaled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 		goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	mod_timer(&wt.timer, jiffies + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	if (dma_fence_wait_timeout(wt.f, false, 2) == -ETIME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 		if (timer_pending(&wt.timer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 			pr_notice("Timer did not fire within the jiffie!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 			err = 0; /* not our fault! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 			pr_err("Wait reported incomplete after timeout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 		goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	del_timer_sync(&wt.timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	destroy_timer_on_stack(&wt.timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	dma_fence_signal(wt.f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	dma_fence_put(wt.f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) static int test_stub(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	struct dma_fence *f[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	for (i = 0; i < ARRAY_SIZE(f); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 		f[i] = dma_fence_get_stub();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 		if (!dma_fence_is_signaled(f[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 			pr_err("Obtained unsignaled stub fence!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 			goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	while (i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 		dma_fence_put(f[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) /* Now off to the races! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) struct race_thread {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	struct dma_fence __rcu **fences;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	struct task_struct *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	bool before;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) static void __wait_for_callbacks(struct dma_fence *f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	spin_lock_irq(f->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	spin_unlock_irq(f->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) static int thread_signal_callback(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	const struct race_thread *t = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	unsigned long pass = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	unsigned long miss = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	while (!err && !kthread_should_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 		struct dma_fence *f1, *f2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 		struct simple_cb cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 		f1 = mock_fence();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 		if (!f1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 			err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 		rcu_assign_pointer(t->fences[t->id], f1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 		smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 		rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 			f2 = dma_fence_get_rcu_safe(&t->fences[!t->id]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 		} while (!f2 && !kthread_should_stop());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 		if (t->before)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 			dma_fence_signal(f1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 		smp_store_mb(cb.seen, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 		if (!f2 || dma_fence_add_callback(f2, &cb.cb, simple_callback))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 			miss++, cb.seen = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 		if (!t->before)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 			dma_fence_signal(f1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 		if (!cb.seen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 			dma_fence_wait(f2, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 			__wait_for_callbacks(f2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 		if (!READ_ONCE(cb.seen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 			pr_err("Callback not seen on thread %d, pass %lu (%lu misses), signaling %s add_callback; fence signaled? %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 			       t->id, pass, miss,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 			       t->before ? "before" : "after",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 			       dma_fence_is_signaled(f2) ? "yes" : "no");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 			err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 		dma_fence_put(f2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 		rcu_assign_pointer(t->fences[t->id], NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 		smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 		dma_fence_put(f1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 		pass++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	pr_info("%s[%d] completed %lu passes, %lu misses\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 		__func__, t->id, pass, miss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) static int race_signal_callback(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	struct dma_fence __rcu *f[2] = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	int pass;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	for (pass = 0; !ret && pass <= 1; pass++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 		struct race_thread t[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 		int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 		for (i = 0; i < ARRAY_SIZE(t); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 			t[i].fences = f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 			t[i].id = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 			t[i].before = pass;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 			t[i].task = kthread_run(thread_signal_callback, &t[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 						"dma-fence:%d", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 			get_task_struct(t[i].task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 		msleep(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 		for (i = 0; i < ARRAY_SIZE(t); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 			int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 			err = kthread_stop(t[i].task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 			if (err && !ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 				ret = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 			put_task_struct(t[i].task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) int dma_fence(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	static const struct subtest tests[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 		SUBTEST(sanitycheck),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 		SUBTEST(test_signaling),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 		SUBTEST(test_add_callback),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 		SUBTEST(test_late_add_callback),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 		SUBTEST(test_rm_callback),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 		SUBTEST(test_late_rm_callback),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 		SUBTEST(test_status),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 		SUBTEST(test_error),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 		SUBTEST(test_wait),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 		SUBTEST(test_wait_timeout),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 		SUBTEST(test_stub),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 		SUBTEST(race_signal_callback),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	pr_info("sizeof(dma_fence)=%zu\n", sizeof(struct dma_fence));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	slab_fences = KMEM_CACHE(mock_fence,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 				 SLAB_TYPESAFE_BY_RCU |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 				 SLAB_HWCACHE_ALIGN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	if (!slab_fences)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	ret = subtests(tests, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 	kmem_cache_destroy(slab_fences);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }