^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright 2017 Advanced Micro Devices, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Permission is hereby granted, free of charge, to any person obtaining a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * copy of this software and associated documentation files (the "Software"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * to deal in the Software without restriction, including without limitation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * the rights to use, copy, modify, merge, publish, distribute, sublicense,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * and/or sell copies of the Software, and to permit persons to whom the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Software is furnished to do so, subject to the following conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * The above copyright notice and this permission notice shall be included in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * all copies or substantial portions of the Software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * OTHER DEALINGS IN THE SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #ifndef DRM_SCHEDULER_SPSC_QUEUE_H_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define DRM_SCHEDULER_SPSC_QUEUE_H_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/preempt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) /** SPSC lockless queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) struct spsc_node {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /* Stores spsc_node* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct spsc_node *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct spsc_queue {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct spsc_node *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* atomic pointer to struct spsc_node* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) atomic_long_t tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) atomic_t job_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static inline void spsc_queue_init(struct spsc_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) queue->head = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) atomic_long_set(&queue->tail, (long)&queue->head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) atomic_set(&queue->job_count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static inline struct spsc_node *spsc_queue_peek(struct spsc_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) return queue->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static inline int spsc_queue_count(struct spsc_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) return atomic_read(&queue->job_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static inline bool spsc_queue_push(struct spsc_queue *queue, struct spsc_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct spsc_node **tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) node->next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) tail = (struct spsc_node **)atomic_long_xchg(&queue->tail, (long)&node->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) WRITE_ONCE(*tail, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) atomic_inc(&queue->job_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * In case of first element verify new node will be visible to the consumer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * thread when we ping the kernel thread that there is new work to do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return tail == &queue->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) static inline struct spsc_node *spsc_queue_pop(struct spsc_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct spsc_node *next, *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /* Verify reading from memory and not the cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) node = READ_ONCE(queue->head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) next = READ_ONCE(node->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) WRITE_ONCE(queue->head, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (unlikely(!next)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /* slowpath for the last element in the queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) if (atomic_long_cmpxchg(&queue->tail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) (long)&node->next, (long) &queue->head) != (long)&node->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /* Updating tail failed wait for new next to appear */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) } while (unlikely(!(queue->head = READ_ONCE(node->next))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) atomic_dec(&queue->job_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) return node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #endif /* DRM_SCHEDULER_SPSC_QUEUE_H_ */