Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2)  * 1,2 and 4 byte cmpxchg and xchg implementations for OpenRISC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 2017 Stafford Horne <shorne@gmail.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * This file is licensed under the terms of the GNU General Public License
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * version 2.  This program is licensed "as is" without any warranty of any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * kind, whether express or implied.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * Note:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  * The portable implementations of 1 and 2 byte xchg and cmpxchg using a 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * byte cmpxchg is sourced heavily from the sh and mips implementations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #ifndef __ASM_OPENRISC_CMPXCHG_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #define __ASM_OPENRISC_CMPXCHG_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include  <linux/bits.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include  <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include  <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #define __HAVE_ARCH_CMPXCHG 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) static inline unsigned long cmpxchg_u32(volatile void *ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 		unsigned long old, unsigned long new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	__asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 		"1:	l.lwa %0, 0(%1)		\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 		"	l.sfeq %0, %2		\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 		"	l.bnf 2f		\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 		"	 l.nop			\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 		"	l.swa 0(%1), %3		\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 		"	l.bnf 1b		\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 		"	 l.nop			\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 		"2:				\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 		: "=&r"(old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 		: "r"(ptr), "r"(old), "r"(new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 		: "cc", "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	return old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) static inline unsigned long xchg_u32(volatile void *ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 		unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	__asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 		"1:	l.lwa %0, 0(%1)		\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 		"	l.swa 0(%1), %2		\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 		"	l.bnf 1b		\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 		"	 l.nop			\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 		: "=&r"(val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 		: "r"(ptr), "r"(val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 		: "cc", "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) static inline u32 cmpxchg_small(volatile void *ptr, u32 old, u32 new,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 				int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	int off = (unsigned long)ptr % sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	volatile u32 *p = ptr - off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) #ifdef __BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	int bitoff = (sizeof(u32) - size - off) * BITS_PER_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	int bitoff = off * BITS_PER_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	u32 bitmask = ((0x1 << size * BITS_PER_BYTE) - 1) << bitoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	u32 load32, old32, new32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	u32 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	load32 = READ_ONCE(*p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		ret = (load32 & bitmask) >> bitoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 		if (old != ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 		old32 = (load32 & ~bitmask) | (old << bitoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		new32 = (load32 & ~bitmask) | (new << bitoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		/* Do 32 bit cmpxchg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		load32 = cmpxchg_u32(p, old32, new32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		if (load32 == old32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 			return old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) /* xchg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) static inline u32 xchg_small(volatile void *ptr, u32 x, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	int off = (unsigned long)ptr % sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	volatile u32 *p = ptr - off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) #ifdef __BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	int bitoff = (sizeof(u32) - size - off) * BITS_PER_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	int bitoff = off * BITS_PER_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	u32 bitmask = ((0x1 << size * BITS_PER_BYTE) - 1) << bitoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	u32 oldv, newv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	u32 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		oldv = READ_ONCE(*p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		ret = (oldv & bitmask) >> bitoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		newv = (oldv & ~bitmask) | (x << bitoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	} while (cmpxchg_u32(p, oldv, newv) != oldv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)  * This function doesn't exist, so you'll get a linker error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)  * if something tries to do an invalid cmpxchg().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) extern unsigned long __cmpxchg_called_with_bad_pointer(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	__compiletime_error("Bad argument size for cmpxchg");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 		unsigned long new, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	switch (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 		return cmpxchg_small(ptr, old, new, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		return cmpxchg_u32(ptr, old, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		return __cmpxchg_called_with_bad_pointer();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #define cmpxchg(ptr, o, n)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	({								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		(__typeof__(*(ptr))) __cmpxchg((ptr),			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 					       (unsigned long)(o),	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 					       (unsigned long)(n),	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 					       sizeof(*(ptr)));		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	})
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)  * This function doesn't exist, so you'll get a linker error if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)  * something tries to do an invalidly-sized xchg().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) extern unsigned long __xchg_called_with_bad_pointer(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	__compiletime_error("Bad argument size for xchg");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) static inline unsigned long __xchg(volatile void *ptr, unsigned long with,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	switch (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		return xchg_small(ptr, with, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		return xchg_u32(ptr, with);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		return __xchg_called_with_bad_pointer();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) #define xchg(ptr, with) 						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	({								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		(__typeof__(*(ptr))) __xchg((ptr),			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 					    (unsigned long)(with),	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 					    sizeof(*(ptr)));		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	})
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #endif /* __ASM_OPENRISC_CMPXCHG_H */