^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #ifndef __ASM_ARC_CMPXCHG_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #define __ASM_ARC_CMPXCHG_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/barrier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <asm/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #ifdef CONFIG_ARC_HAS_LLSC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) static inline unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) unsigned long prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * Explicit full memory barrier needed before/after as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * LLOCK/SCOND themselves don't provide any such semantics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) __asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) "1: llock %0, [%1] \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) " brne %0, %2, 2f \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) " scond %3, [%1] \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) " bnz 1b \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) "2: \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) : "=&r"(prev) /* Early clobber, to prevent reg reuse */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) : "r"(ptr), /* Not "m": llock only supports reg direct addr mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) "ir"(expected),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) "r"(new) /* can't be "ir". scond can't take LIMM for "b" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) : "cc", "memory"); /* so that gcc knows memory is being written here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) return prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #else /* !CONFIG_ARC_HAS_LLSC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static inline unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) int prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) volatile unsigned long *p = ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * spin lock/unlock provide the needed smp_mb() before/after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) atomic_ops_lock(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) prev = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if (prev == expected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) *p = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) atomic_ops_unlock(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) return prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define cmpxchg(ptr, o, n) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) (typeof(*(ptr)))__cmpxchg((ptr), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) (unsigned long)(o), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) (unsigned long)(n)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * atomic_cmpxchg is same as cmpxchg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * LLSC: only different in data-type, semantics are exactly same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * !LLSC: cmpxchg() has to use an external lock atomic_ops_lock to guarantee
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * semantics, and this lock also happens to be used by atomic_*()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * xchg (reg with memory) based on "Native atomic" EX insn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) extern unsigned long __xchg_bad_pointer(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) switch (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) __asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) " ex %0, [%1] \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) : "+r"(val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) : "r"(ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) return __xchg_bad_pointer();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define _xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) sizeof(*(ptr))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * xchg() maps directly to ARC EX instruction which guarantees atomicity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * However in !LLSC config, it also needs to be use @atomic_ops_lock spinlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * due to a subtle reason:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * - For !LLSC, cmpxchg() needs to use that lock (see above) and there is lot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * of kernel code which calls xchg()/cmpxchg() on same data (see llist.h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * Hence xchg() needs to follow same locking rules.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * Technically the lock is also needed for UP (boils down to irq save/restore)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * but we can cheat a bit since cmpxchg() atomic_ops_lock() would cause irqs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * be disabled thus can't possibly be interrpted/preempted/clobbered by xchg()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * Other way around, xchg is one instruction anyways, so can't be interrupted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * as such
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define xchg(ptr, with) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) unsigned long flags; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) typeof(*(ptr)) old_val; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) atomic_ops_lock(flags); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) old_val = _xchg(ptr, with); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) atomic_ops_unlock(flags); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) old_val; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define xchg(ptr, with) _xchg(ptr, with)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * "atomic" variant of xchg()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * REQ: It needs to follow the same serialization rules as other atomic_xxx()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * Since xchg() doesn't always do that, it would seem that following defintion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * is incorrect. But here's the rationale:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * SMP : Even xchg() takes the atomic_ops_lock, so OK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * LLSC: atomic_ops_lock are not relevant at all (even if SMP, since LLSC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * is natively "SMP safe", no serialization required).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * UP : other atomics disable IRQ, so no way a difft ctxt atomic_xchg()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * could clobber them. atomic_xchg() itself would be 1 insn, so it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * can't be clobbered by others. Thus no serialization required when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * atomic_xchg is involved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #endif