^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright 2019 Advanced Micro Devices, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Permission is hereby granted, free of charge, to any person obtaining a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * copy of this software and associated documentation files (the "Software"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * to deal in the Software without restriction, including without limitation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * the rights to use, copy, modify, merge, publish, distribute, sublicense,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * and/or sell copies of the Software, and to permit persons to whom the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Software is furnished to do so, subject to the following conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * The above copyright notice and this permission notice shall be included in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * all copies or substantial portions of the Software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * OTHER DEALINGS IN THE SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/semaphore.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * Reusable 2 PHASE task barrier (randevouz point) implementation for N tasks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * Based on the Little book of sempahores - https://greenteapress.com/wp/semaphores/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #ifndef DRM_TASK_BARRIER_H_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define DRM_TASK_BARRIER_H_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * Represents an instance of a task barrier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct task_barrier {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) unsigned int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) atomic_t count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct semaphore enter_turnstile;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct semaphore exit_turnstile;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static inline void task_barrier_signal_turnstile(struct semaphore *turnstile,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) unsigned int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) for (i = 0 ; i < n; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) up(turnstile);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static inline void task_barrier_init(struct task_barrier *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) tb->n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) atomic_set(&tb->count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) sema_init(&tb->enter_turnstile, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) sema_init(&tb->exit_turnstile, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static inline void task_barrier_add_task(struct task_barrier *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) tb->n++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static inline void task_barrier_rem_task(struct task_barrier *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) tb->n--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * Lines up all the threads BEFORE the critical point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * When all thread passed this code the entry barrier is back to locked state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static inline void task_barrier_enter(struct task_barrier *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if (atomic_inc_return(&tb->count) == tb->n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) task_barrier_signal_turnstile(&tb->enter_turnstile, tb->n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) down(&tb->enter_turnstile);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * Lines up all the threads AFTER the critical point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * This function is used to avoid any one thread running ahead if the barrier is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * used repeatedly .
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static inline void task_barrier_exit(struct task_barrier *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (atomic_dec_return(&tb->count) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) task_barrier_signal_turnstile(&tb->exit_turnstile, tb->n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) down(&tb->exit_turnstile);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /* Convinieince function when nothing to be done in between entry and exit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static inline void task_barrier_full(struct task_barrier *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) task_barrier_enter(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) task_barrier_exit(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #endif