Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) #ifndef _ASM_X86_BITOPS_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #define _ASM_X86_BITOPS_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright 1992, Linus Torvalds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * Note: inlines with more than a single statement should be marked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * __always_inline to avoid problems with older gcc's inlining heuristics.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #ifndef _LINUX_BITOPS_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #error only <linux/bitops.h> can be included directly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <asm/alternative.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <asm/rmwcc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <asm/barrier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #if BITS_PER_LONG == 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) # define _BITOPS_LONG_SHIFT 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #elif BITS_PER_LONG == 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) # define _BITOPS_LONG_SHIFT 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) # error "Unexpected BITS_PER_LONG"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #define BIT_64(n)			(U64_C(1) << (n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  * These have to be done with inline assembly: that way the bit-setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  * is guaranteed to be atomic. All bit operations return 0 if the bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  * was cleared before the operation and != 0 if it was not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #define RLONG_ADDR(x)			 "m" (*(volatile long *) (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #define WBYTE_ADDR(x)			"+m" (*(volatile char *) (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #define ADDR				RLONG_ADDR(addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  * We do the locked ops that don't return the old value as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  * a mask operation on a byte.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #define CONST_MASK_ADDR(nr, addr)	WBYTE_ADDR((void *)(addr) + ((nr)>>3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #define CONST_MASK(nr)			(1 << ((nr) & 7))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) static __always_inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) arch_set_bit(long nr, volatile unsigned long *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	if (__builtin_constant_p(nr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 		asm volatile(LOCK_PREFIX "orb %b1,%0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 			: CONST_MASK_ADDR(nr, addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 			: "iq" (CONST_MASK(nr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 			: "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 		asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 			: : RLONG_ADDR(addr), "Ir" (nr) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) static __always_inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) arch___set_bit(long nr, volatile unsigned long *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) static __always_inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) arch_clear_bit(long nr, volatile unsigned long *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	if (__builtin_constant_p(nr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		asm volatile(LOCK_PREFIX "andb %b1,%0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 			: CONST_MASK_ADDR(nr, addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 			: "iq" (~CONST_MASK(nr)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 			: : RLONG_ADDR(addr), "Ir" (nr) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) static __always_inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) arch_clear_bit_unlock(long nr, volatile unsigned long *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	arch_clear_bit(nr, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) static __always_inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) arch___clear_bit(long nr, volatile unsigned long *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) static __always_inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) arch_clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	bool negative;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	asm volatile(LOCK_PREFIX "andb %2,%1"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		CC_SET(s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		: CC_OUT(s) (negative), WBYTE_ADDR(addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		: "ir" ((char) ~(1 << nr)) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	return negative;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define arch_clear_bit_unlock_is_negative_byte                                 \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	arch_clear_bit_unlock_is_negative_byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) static __always_inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) arch___clear_bit_unlock(long nr, volatile unsigned long *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	arch___clear_bit(nr, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static __always_inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) arch___change_bit(long nr, volatile unsigned long *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	asm volatile(__ASM_SIZE(btc) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static __always_inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) arch_change_bit(long nr, volatile unsigned long *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	if (__builtin_constant_p(nr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 		asm volatile(LOCK_PREFIX "xorb %b1,%0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 			: CONST_MASK_ADDR(nr, addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 			: "iq" (CONST_MASK(nr)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 			: : RLONG_ADDR(addr), "Ir" (nr) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static __always_inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) arch_test_and_set_bit(long nr, volatile unsigned long *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts), *addr, c, "Ir", nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static __always_inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) arch_test_and_set_bit_lock(long nr, volatile unsigned long *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	return arch_test_and_set_bit(nr, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static __always_inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) arch___test_and_set_bit(long nr, volatile unsigned long *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	bool oldbit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	asm(__ASM_SIZE(bts) " %2,%1"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	    CC_SET(c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	    : CC_OUT(c) (oldbit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	    : ADDR, "Ir" (nr) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	return oldbit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static __always_inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) arch_test_and_clear_bit(long nr, volatile unsigned long *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btr), *addr, c, "Ir", nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)  * Note: the operation is performed atomically with respect to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)  * the local CPU, but not other CPUs. Portable code should not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)  * rely on this behaviour.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)  * KVM relies on this behaviour on x86 for modifying memory that is also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)  * accessed from a hypervisor on the same CPU if running in a VM: don't change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)  * this without also updating arch/x86/kernel/kvm.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static __always_inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) arch___test_and_clear_bit(long nr, volatile unsigned long *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	bool oldbit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	asm volatile(__ASM_SIZE(btr) " %2,%1"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		     CC_SET(c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		     : CC_OUT(c) (oldbit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		     : ADDR, "Ir" (nr) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	return oldbit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) static __always_inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) arch___test_and_change_bit(long nr, volatile unsigned long *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	bool oldbit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	asm volatile(__ASM_SIZE(btc) " %2,%1"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 		     CC_SET(c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		     : CC_OUT(c) (oldbit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 		     : ADDR, "Ir" (nr) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	return oldbit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) static __always_inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) arch_test_and_change_bit(long nr, volatile unsigned long *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), *addr, c, "Ir", nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	return ((1UL << (nr & (BITS_PER_LONG-1))) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		(addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	bool oldbit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	asm volatile(__ASM_SIZE(bt) " %2,%1"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 		     CC_SET(c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		     : CC_OUT(c) (oldbit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		     : "m" (*(unsigned long *)addr), "Ir" (nr) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	return oldbit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) #define arch_test_bit(nr, addr)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	(__builtin_constant_p((nr))		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	 ? constant_test_bit((nr), (addr))	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	 : variable_test_bit((nr), (addr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)  * __ffs - find first set bit in word
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)  * @word: The word to search
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)  * Undefined if no bit exists, so code should check against 0 first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static __always_inline unsigned long __ffs(unsigned long word)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	asm("rep; bsf %1,%0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		: "=r" (word)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		: "rm" (word));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	return word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)  * ffz - find first zero bit in word
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)  * @word: The word to search
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)  * Undefined if no zero exists, so code should check against ~0UL first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static __always_inline unsigned long ffz(unsigned long word)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	asm("rep; bsf %1,%0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 		: "=r" (word)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		: "r" (~word));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	return word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)  * __fls: find last set bit in word
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)  * @word: The word to search
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)  * Undefined if no set bit exists, so code should check against 0 first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static __always_inline unsigned long __fls(unsigned long word)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	asm("bsr %1,%0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	    : "=r" (word)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	    : "rm" (word));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	return word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) #undef ADDR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) #ifdef __KERNEL__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)  * ffs - find first set bit in word
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)  * @x: the word to search
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)  * This is defined the same way as the libc and compiler builtin ffs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)  * routines, therefore differs in spirit from the other bitops.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)  * ffs(value) returns 0 if value is 0 or the position of the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)  * set bit if value is nonzero. The first (least significant) bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)  * is at position 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static __always_inline int ffs(int x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	 * AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	 * dest reg is undefined if x==0, but their CPU architect says its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	 * value is written to set it to the same as before, except that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	 * top 32 bits will be cleared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	 * We cannot do this on 32 bits because at the very least some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	 * 486 CPUs did not behave this way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	asm("bsfl %1,%0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	    : "=r" (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	    : "rm" (x), "0" (-1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) #elif defined(CONFIG_X86_CMOV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	asm("bsfl %1,%0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	    "cmovzl %2,%0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	    : "=&r" (r) : "rm" (x), "r" (-1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	asm("bsfl %1,%0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	    "jnz 1f\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	    "movl $-1,%0\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	    "1:" : "=r" (r) : "rm" (x));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	return r + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)  * fls - find last set bit in word
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)  * @x: the word to search
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)  * This is defined in a similar way as the libc and compiler builtin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)  * ffs, but returns the position of the most significant set bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)  * fls(value) returns 0 if value is 0 or the position of the last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)  * set bit if value is nonzero. The last (most significant) bit is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)  * at position 32.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) static __always_inline int fls(unsigned int x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	 * AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	 * dest reg is undefined if x==0, but their CPU architect says its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	 * value is written to set it to the same as before, except that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	 * top 32 bits will be cleared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	 * We cannot do this on 32 bits because at the very least some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	 * 486 CPUs did not behave this way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	asm("bsrl %1,%0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	    : "=r" (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	    : "rm" (x), "0" (-1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) #elif defined(CONFIG_X86_CMOV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	asm("bsrl %1,%0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	    "cmovzl %2,%0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	    : "=&r" (r) : "rm" (x), "rm" (-1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	asm("bsrl %1,%0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	    "jnz 1f\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	    "movl $-1,%0\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	    "1:" : "=r" (r) : "rm" (x));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	return r + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)  * fls64 - find last set bit in a 64-bit word
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)  * @x: the word to search
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)  * This is defined in a similar way as the libc and compiler builtin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)  * ffsll, but returns the position of the most significant set bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)  * fls64(value) returns 0 if value is 0 or the position of the last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)  * set bit if value is nonzero. The last (most significant) bit is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)  * at position 64.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) static __always_inline int fls64(__u64 x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	int bitpos = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	 * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	 * dest reg is undefined if x==0, but their CPU architect says its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	 * value is written to set it to the same as before.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	asm("bsrq %1,%q0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	    : "+r" (bitpos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	    : "rm" (x));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	return bitpos + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) #include <asm-generic/bitops/fls64.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) #include <asm-generic/bitops/find.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) #include <asm-generic/bitops/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) #include <asm/arch_hweight.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) #include <asm-generic/bitops/const_hweight.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) #include <asm-generic/bitops/instrumented-atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) #include <asm-generic/bitops/instrumented-non-atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) #include <asm-generic/bitops/instrumented-lock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) #include <asm-generic/bitops/le.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) #include <asm-generic/bitops/ext2-atomic-setbit.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) #endif /* __KERNEL__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) #endif /* _ASM_X86_BITOPS_H */