^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef __ASM_SH_BITOPS_OP32_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define __ASM_SH_BITOPS_OP32_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * The bit modifying instructions on SH-2A are only capable of working
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * with a 3-bit immediate, which signifies the shift position for the bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * being worked on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #if defined(__BIG_ENDIAN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #define BYTE_NUMBER(nr) ((nr ^ BITOP_LE_SWIZZLE) / BITS_PER_BYTE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define BYTE_OFFSET(nr) ((nr ^ BITOP_LE_SWIZZLE) % BITS_PER_BYTE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #define BYTE_NUMBER(nr) ((nr) / BITS_PER_BYTE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define BYTE_OFFSET(nr) ((nr) % BITS_PER_BYTE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) static inline void __set_bit(int nr, volatile unsigned long *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) if (__builtin_constant_p(nr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) __asm__ __volatile__ (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) "bset.b %1, @(%O2,%0) ! __set_bit\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) : "+r" (addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) : "i" (BYTE_OFFSET(nr)), "i" (BYTE_NUMBER(nr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) : "t", "memory"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) unsigned long mask = BIT_MASK(nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) *p |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static inline void __clear_bit(int nr, volatile unsigned long *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) if (__builtin_constant_p(nr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) __asm__ __volatile__ (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) "bclr.b %1, @(%O2,%0) ! __clear_bit\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) : "+r" (addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) : "i" (BYTE_OFFSET(nr)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) "i" (BYTE_NUMBER(nr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) : "t", "memory"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) unsigned long mask = BIT_MASK(nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) *p &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * __change_bit - Toggle a bit in memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * @nr: the bit to change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * @addr: the address to start counting from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * Unlike change_bit(), this function is non-atomic and may be reordered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * If it's called on the same region of memory simultaneously, the effect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * may be that only one operation succeeds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static inline void __change_bit(int nr, volatile unsigned long *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) if (__builtin_constant_p(nr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) __asm__ __volatile__ (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) "bxor.b %1, @(%O2,%0) ! __change_bit\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) : "+r" (addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) : "i" (BYTE_OFFSET(nr)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) "i" (BYTE_NUMBER(nr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) : "t", "memory"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) unsigned long mask = BIT_MASK(nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) *p ^= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * __test_and_set_bit - Set a bit and return its old value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * @nr: Bit to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * @addr: Address to count from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * This operation is non-atomic and can be reordered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * If two examples of this operation race, one can appear to succeed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * but actually fail. You must protect multiple accesses with a lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) unsigned long mask = BIT_MASK(nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) unsigned long old = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) *p = old | mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) return (old & mask) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * __test_and_clear_bit - Clear a bit and return its old value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * @nr: Bit to clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * @addr: Address to count from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * This operation is non-atomic and can be reordered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * If two examples of this operation race, one can appear to succeed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * but actually fail. You must protect multiple accesses with a lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) unsigned long mask = BIT_MASK(nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) unsigned long old = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) *p = old & ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) return (old & mask) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /* WARNING: non atomic and it can be reordered! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) static inline int __test_and_change_bit(int nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) volatile unsigned long *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) unsigned long mask = BIT_MASK(nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) unsigned long old = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) *p = old ^ mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) return (old & mask) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * test_bit - Determine whether a bit is set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * @nr: bit number to test
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * @addr: Address to start counting from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static inline int test_bit(int nr, const volatile unsigned long *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #endif /* __ASM_SH_BITOPS_OP32_H */