^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2012 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #ifndef __ASM_SPINLOCK_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #define __ASM_SPINLOCK_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <asm/qrwlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <asm/qspinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) /* See include/linux/spinlock.h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #define smp_mb__after_spinlock() smp_mb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Changing this will break osq_lock() thanks to the call inside
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * smp_cond_load_relaxed().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * See:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * https://lore.kernel.org/lkml/20200110100612.GC2827@hirez.programming.kicks-ass.net
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define vcpu_is_preempted vcpu_is_preempted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) static inline bool vcpu_is_preempted(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #endif /* __ASM_SPINLOCK_H */