Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) #ifndef __ASM_SPINLOCK_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #define __ASM_SPINLOCK_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <asm/barrier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <asm/ldcw.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <asm/spinlock_types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) static inline int arch_spin_is_locked(arch_spinlock_t *x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 	volatile unsigned int *a = __ldcw_align(x);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 	return READ_ONCE(*a) == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) static inline void arch_spin_lock(arch_spinlock_t *x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 	volatile unsigned int *a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 	a = __ldcw_align(x);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 	while (__ldcw(a) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 		while (*a == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) static inline void arch_spin_lock_flags(arch_spinlock_t *x,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 					unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	volatile unsigned int *a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	a = __ldcw_align(x);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	while (__ldcw(a) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 		while (*a == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 			if (flags & PSW_SM_I) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 				local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 				local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #define arch_spin_lock_flags arch_spin_lock_flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) static inline void arch_spin_unlock(arch_spinlock_t *x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	volatile unsigned int *a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	a = __ldcw_align(x);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	/* Release with ordered store. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	__asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) static inline int arch_spin_trylock(arch_spinlock_t *x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	volatile unsigned int *a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	a = __ldcw_align(x);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	return __ldcw(a) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59)  * Read-write spinlocks, allowing multiple readers but only one writer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  * Unfair locking as Writers could be starved indefinitely by Reader(s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  * The spinlock itself is contained in @counter and access to it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  * serialized with @lock_mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) /* 1 - lock taken successfully */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) static inline int arch_read_trylock(arch_rwlock_t *rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	arch_spin_lock(&(rw->lock_mutex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	 * zero means writer holds the lock exclusively, deny Reader.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	 * Otherwise grant lock to first/subseq reader
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	if (rw->counter > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 		rw->counter--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	arch_spin_unlock(&(rw->lock_mutex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) /* 1 - lock taken successfully */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) static inline int arch_write_trylock(arch_rwlock_t *rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	arch_spin_lock(&(rw->lock_mutex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	 * deny writer. Otherwise if unlocked grant to writer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	 * Hence the claim that Linux rwlocks are unfair to writers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	 * (can be starved for an indefinite time by readers).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		rw->counter = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	arch_spin_unlock(&(rw->lock_mutex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static inline void arch_read_lock(arch_rwlock_t *rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	while (!arch_read_trylock(rw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) static inline void arch_write_lock(arch_rwlock_t *rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	while (!arch_write_trylock(rw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static inline void arch_read_unlock(arch_rwlock_t *rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	arch_spin_lock(&(rw->lock_mutex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	rw->counter++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	arch_spin_unlock(&(rw->lock_mutex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static inline void arch_write_unlock(arch_rwlock_t *rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	arch_spin_lock(&(rw->lock_mutex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	arch_spin_unlock(&(rw->lock_mutex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #endif /* __ASM_SPINLOCK_H */