^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright 2005, Red Hat, Inc., Ingo Molnar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Released under the General Public License (GPL).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * This file contains the spinlock/rwlock implementations for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * DEBUG_SPINLOCK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/nmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/debug_locks.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) struct lock_class_key *key, short inner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #ifdef CONFIG_DEBUG_LOCK_ALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * Make sure we are not reinitializing a held lock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) debug_check_no_locks_freed((void *)lock, sizeof(*lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) lockdep_init_map_wait(&lock->dep_map, name, key, 0, inner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) lock->magic = SPINLOCK_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) lock->owner = SPINLOCK_OWNER_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) lock->owner_cpu = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) EXPORT_SYMBOL(__raw_spin_lock_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) void __rwlock_init(rwlock_t *lock, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct lock_class_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #ifdef CONFIG_DEBUG_LOCK_ALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * Make sure we are not reinitializing a held lock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) debug_check_no_locks_freed((void *)lock, sizeof(*lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) lock->magic = RWLOCK_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) lock->owner = SPINLOCK_OWNER_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) lock->owner_cpu = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) EXPORT_SYMBOL(__rwlock_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static void spin_dump(raw_spinlock_t *lock, const char *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct task_struct *owner = READ_ONCE(lock->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) if (owner == SPINLOCK_OWNER_INIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) owner = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) msg, raw_smp_processor_id(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) current->comm, task_pid_nr(current));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) printk(KERN_EMERG " lock: %pS, .magic: %08x, .owner: %s/%d, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) ".owner_cpu: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) lock, READ_ONCE(lock->magic),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) owner ? owner->comm : "<none>",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) owner ? task_pid_nr(owner) : -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) READ_ONCE(lock->owner_cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) dump_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static void spin_bug(raw_spinlock_t *lock, const char *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (!debug_locks_off())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) spin_dump(lock, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) debug_spin_lock_before(raw_spinlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) SPIN_BUG_ON(READ_ONCE(lock->magic) != SPINLOCK_MAGIC, lock, "bad magic");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) SPIN_BUG_ON(READ_ONCE(lock->owner) == current, lock, "recursion");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) SPIN_BUG_ON(READ_ONCE(lock->owner_cpu) == raw_smp_processor_id(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) lock, "cpu recursion");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) static inline void debug_spin_lock_after(raw_spinlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) WRITE_ONCE(lock->owner_cpu, raw_smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) WRITE_ONCE(lock->owner, current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) static inline void debug_spin_unlock(raw_spinlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) lock, "wrong CPU");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) WRITE_ONCE(lock->owner, SPINLOCK_OWNER_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) WRITE_ONCE(lock->owner_cpu, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * We are now relying on the NMI watchdog to detect lockup instead of doing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * the detection here with an unfair lock which can cause problem of its own.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) void do_raw_spin_lock(raw_spinlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) debug_spin_lock_before(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) arch_spin_lock(&lock->raw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) mmiowb_spin_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) debug_spin_lock_after(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) int do_raw_spin_trylock(raw_spinlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) int ret = arch_spin_trylock(&lock->raw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) mmiowb_spin_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) debug_spin_lock_after(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #ifndef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * Must not happen on UP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) SPIN_BUG_ON(!ret, lock, "trylock failure on UP");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) void do_raw_spin_unlock(raw_spinlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) mmiowb_spin_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) debug_spin_unlock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) arch_spin_unlock(&lock->raw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static void rwlock_bug(rwlock_t *lock, const char *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) if (!debug_locks_off())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) msg, raw_smp_processor_id(), current->comm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) task_pid_nr(current), lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) dump_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) void do_raw_read_lock(rwlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) arch_read_lock(&lock->raw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) int do_raw_read_trylock(rwlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) int ret = arch_read_trylock(&lock->raw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) #ifndef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * Must not happen on UP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) void do_raw_read_unlock(rwlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) arch_read_unlock(&lock->raw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static inline void debug_write_lock_before(rwlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) RWLOCK_BUG_ON(lock->owner == current, lock, "recursion");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) lock, "cpu recursion");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static inline void debug_write_lock_after(rwlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) WRITE_ONCE(lock->owner_cpu, raw_smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) WRITE_ONCE(lock->owner, current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) static inline void debug_write_unlock(rwlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) lock, "wrong CPU");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) WRITE_ONCE(lock->owner, SPINLOCK_OWNER_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) WRITE_ONCE(lock->owner_cpu, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) void do_raw_write_lock(rwlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) debug_write_lock_before(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) arch_write_lock(&lock->raw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) debug_write_lock_after(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) int do_raw_write_trylock(rwlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) int ret = arch_write_trylock(&lock->raw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) debug_write_lock_after(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) #ifndef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * Must not happen on UP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) void do_raw_write_unlock(rwlock_t *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) debug_write_unlock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) arch_write_unlock(&lock->raw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }