^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * bitops.c: atomic operations which got too long to be inlined all over
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * the place.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright 2000 Grant Grundler (grundler@cup.hp.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) [0 ... (ATOMIC_HASH_SIZE-1)] = __ARCH_SPIN_LOCK_UNLOCKED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) unsigned long __xchg64(unsigned long x, volatile unsigned long *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) unsigned long temp, flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) _atomic_spin_lock_irqsave(ptr, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) temp = *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) *ptr = x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) _atomic_spin_unlock_irqrestore(ptr, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) return temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) unsigned long __xchg32(int x, volatile int *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) long temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) _atomic_spin_lock_irqsave(ptr, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) temp = (long) *ptr; /* XXX - sign extension wanted? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) *ptr = x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) _atomic_spin_unlock_irqrestore(ptr, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) return (unsigned long)temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) unsigned long __xchg8(char x, volatile char *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) long temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) _atomic_spin_lock_irqsave(ptr, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) temp = (long) *ptr; /* XXX - sign extension wanted? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) *ptr = x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) _atomic_spin_unlock_irqrestore(ptr, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) return (unsigned long)temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) u64 prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) _atomic_spin_lock_irqsave(ptr, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) if ((prev = *ptr) == old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) *ptr = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) _atomic_spin_unlock_irqrestore(ptr, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) unsigned long __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsigned int new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) unsigned int prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) _atomic_spin_lock_irqsave(ptr, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if ((prev = *ptr) == old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) *ptr = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) _atomic_spin_unlock_irqrestore(ptr, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) return (unsigned long)prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) u8 prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) _atomic_spin_lock_irqsave(ptr, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) if ((prev = *ptr) == old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) *ptr = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) _atomic_spin_unlock_irqrestore(ptr, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) return prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }