^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * This is an implementation of the notion of "decrement a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * reference count, and return locked if it decremented to zero".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * NOTE NOTE NOTE! This is _not_ equivalent to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * if (atomic_dec_and_test(&atomic)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * spin_lock(&lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * because the spin-lock and the decrement must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * "atomic".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) if (atomic_add_unless(atomic, -1, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /* Otherwise do it the slow way */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) spin_lock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) if (atomic_dec_and_test(atomic))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) spin_unlock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) EXPORT_SYMBOL(_atomic_dec_and_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) unsigned long *flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) if (atomic_add_unless(atomic, -1, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /* Otherwise do it the slow way */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) spin_lock_irqsave(lock, *flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) if (atomic_dec_and_test(atomic))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) spin_unlock_irqrestore(lock, *flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) EXPORT_SYMBOL(_atomic_dec_and_lock_irqsave);