Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) #ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <asm/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * __set_bit - Set a bit in memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * @nr: the bit to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * @addr: the address to start counting from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  * Unlike set_bit(), this function is non-atomic and may be reordered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * If it's called on the same region of memory simultaneously, the effect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  * may be that only one operation succeeds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) static inline void __set_bit(int nr, volatile unsigned long *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 	unsigned long mask = BIT_MASK(nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 	*p  |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) static inline void __clear_bit(int nr, volatile unsigned long *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	unsigned long mask = BIT_MASK(nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	*p &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  * __change_bit - Toggle a bit in memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  * @nr: the bit to change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  * @addr: the address to start counting from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  * Unlike change_bit(), this function is non-atomic and may be reordered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  * If it's called on the same region of memory simultaneously, the effect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  * may be that only one operation succeeds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) static inline void __change_bit(int nr, volatile unsigned long *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	unsigned long mask = BIT_MASK(nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	*p ^= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50)  * __test_and_set_bit - Set a bit and return its old value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51)  * @nr: Bit to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52)  * @addr: Address to count from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54)  * This operation is non-atomic and can be reordered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55)  * If two examples of this operation race, one can appear to succeed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  * but actually fail.  You must protect multiple accesses with a lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	unsigned long mask = BIT_MASK(nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	unsigned long old = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	*p = old | mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	return (old & mask) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69)  * __test_and_clear_bit - Clear a bit and return its old value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70)  * @nr: Bit to clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71)  * @addr: Address to count from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73)  * This operation is non-atomic and can be reordered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74)  * If two examples of this operation race, one can appear to succeed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75)  * but actually fail.  You must protect multiple accesses with a lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	unsigned long mask = BIT_MASK(nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	unsigned long old = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	*p = old & ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	return (old & mask) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) /* WARNING: non atomic and it can be reordered! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) static inline int __test_and_change_bit(int nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 					    volatile unsigned long *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	unsigned long mask = BIT_MASK(nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	unsigned long old = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	*p = old ^ mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	return (old & mask) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)  * test_bit - Determine whether a bit is set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)  * @nr: bit number to test
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)  * @addr: Address to start counting from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static inline int test_bit(int nr, const volatile unsigned long *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */