^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Atomic xchg and cmpxchg operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * This file is subject to the terms and conditions of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * License. See the file "COPYING" in the main directory of this archive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 2001 - 2005 Tensilica Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #ifndef _XTENSA_CMPXCHG_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #define _XTENSA_CMPXCHG_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #ifndef __ASSEMBLY__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/bits.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/stringify.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * cmpxchg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) static inline unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) __cmpxchg_u32(volatile int *p, int old, int new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #if XCHAL_HAVE_EXCLUSIVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) unsigned long tmp, result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) __asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) "1: l32ex %[result], %[addr]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) " bne %[result], %[cmp], 2f\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) " mov %[tmp], %[new]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) " s32ex %[tmp], %[addr]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) " getex %[tmp]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) " beqz %[tmp], 1b\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) "2:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) : [result] "=&a" (result), [tmp] "=&a" (tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) : [new] "a" (new), [addr] "a" (p), [cmp] "a" (old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) : "memory"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #elif XCHAL_HAVE_S32C1I
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) __asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) " wsr %[cmp], scompare1\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) " s32c1i %[new], %[mem]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) : [new] "+a" (new), [mem] "+m" (*p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) : [cmp] "a" (old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) : "memory"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) return new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) __asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) " rsil a15, "__stringify(TOPLEVEL)"\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) " l32i %[old], %[mem]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) " bne %[old], %[cmp], 1f\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) " s32i %[new], %[mem]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) "1:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) " wsr a15, ps\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) " rsync\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) : [old] "=&a" (old), [mem] "+m" (*p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) : [cmp] "a" (old), [new] "r" (new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) : "a15", "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) return old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /* This function doesn't exist, so you'll get a linker error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * if something tries to do an invalid cmpxchg(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) extern void __cmpxchg_called_with_bad_pointer(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static __inline__ unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) switch (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) case 4: return __cmpxchg_u32(ptr, old, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) default: __cmpxchg_called_with_bad_pointer();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) return old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define cmpxchg(ptr,o,n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) ({ __typeof__(*(ptr)) _o_ = (o); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) __typeof__(*(ptr)) _n_ = (n); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) (unsigned long)_n_, sizeof (*(ptr))); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #include <asm-generic/cmpxchg-local.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static inline unsigned long __cmpxchg_local(volatile void *ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) unsigned long old,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) unsigned long new, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) switch (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return __cmpxchg_u32(ptr, old, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) return __cmpxchg_local_generic(ptr, old, new, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) return old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * them available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define cmpxchg_local(ptr, o, n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) (unsigned long)(n), sizeof(*(ptr))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * xchg_u32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * Note that a15 is used here because the register allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * done by the compiler is not guaranteed and a window overflow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * may not occur between the rsil and wsr instructions. By using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * a15 in the rsil, the machine is guaranteed to be in a state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * where no register reference will cause an overflow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #if XCHAL_HAVE_EXCLUSIVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) unsigned long tmp, result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) __asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) "1: l32ex %[result], %[addr]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) " mov %[tmp], %[val]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) " s32ex %[tmp], %[addr]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) " getex %[tmp]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) " beqz %[tmp], 1b\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) : [result] "=&a" (result), [tmp] "=&a" (tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) : [val] "a" (val), [addr] "a" (m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) : "memory"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #elif XCHAL_HAVE_S32C1I
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) unsigned long tmp, result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) __asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) "1: l32i %[tmp], %[mem]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) " mov %[result], %[val]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) " wsr %[tmp], scompare1\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) " s32c1i %[result], %[mem]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) " bne %[result], %[tmp], 1b\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) : [result] "=&a" (result), [tmp] "=&a" (tmp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) [mem] "+m" (*m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) : [val] "a" (val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) : "memory"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) unsigned long tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) __asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) " rsil a15, "__stringify(TOPLEVEL)"\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) " l32i %[tmp], %[mem]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) " s32i %[val], %[mem]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) " wsr a15, ps\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) " rsync\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) : [tmp] "=&a" (tmp), [mem] "+m" (*m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) : [val] "a" (val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) : "a15", "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) return tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #define xchg(ptr,x) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static inline u32 xchg_small(volatile void *ptr, u32 x, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) int off = (unsigned long)ptr % sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) volatile u32 *p = ptr - off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) #ifdef __BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) int bitoff = (sizeof(u32) - size - off) * BITS_PER_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) int bitoff = off * BITS_PER_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) u32 bitmask = ((0x1 << size * BITS_PER_BYTE) - 1) << bitoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) u32 oldv, newv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) u32 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) oldv = READ_ONCE(*p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) ret = (oldv & bitmask) >> bitoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) newv = (oldv & ~bitmask) | (x << bitoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) } while (__cmpxchg_u32(p, oldv, newv) != oldv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * This only works if the compiler isn't horribly bad at optimizing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * gcc-2.5.8 reportedly can't handle this, but I define that one to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * be dead anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) extern void __xchg_called_with_bad_pointer(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static __inline__ unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) __xchg(unsigned long x, volatile void * ptr, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) switch (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) return xchg_small(ptr, x, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return xchg_small(ptr, x, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) return xchg_u32(ptr, x);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) __xchg_called_with_bad_pointer();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) return x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) #endif /* __ASSEMBLY__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) #endif /* _XTENSA_CMPXCHG_H */