Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * kernel/locking/mutex.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Mutexes: blocking mutual exclusion locks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * Started by Ingo Molnar:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  * David Howells for suggestions and improvements.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  *  - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  *    from the -rt tree, where it was originally implemented for rtmutexes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  *    by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  *    and Sven Dietrich.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  * Also see Documentation/locking/mutex-design.rst.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/ww_mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/sched/rt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/sched/wake_q.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/sched/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/debug_locks.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/osq_lock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #ifdef CONFIG_DEBUG_MUTEXES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) # include "mutex-debug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) # include "mutex.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include <trace/hooks/dtask.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	atomic_long_set(&lock->owner, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 	spin_lock_init(&lock->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 	INIT_LIST_HEAD(&lock->wait_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	osq_lock_init(&lock->osq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	debug_mutex_init(lock, name, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) EXPORT_SYMBOL(__mutex_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56)  * @owner: contains: 'struct task_struct *' to the current lock owner,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57)  * NULL means not owned. Since task_struct pointers are aligned at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58)  * at least L1_CACHE_BYTES, we have low bits to store extra state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60)  * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61)  * Bit1 indicates unlock needs to hand the lock to the top-waiter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62)  * Bit2 indicates handoff has been done and we're waiting for pickup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #define MUTEX_FLAG_WAITERS	0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) #define MUTEX_FLAG_HANDOFF	0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) #define MUTEX_FLAG_PICKUP	0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) #define MUTEX_FLAGS		0x07
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71)  * Internal helper function; C doesn't allow us to hide it :/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73)  * DO NOT USE (outside of mutex code).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) static inline struct task_struct *__mutex_owner(struct mutex *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) static inline struct task_struct *__owner_task(unsigned long owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	return (struct task_struct *)(owner & ~MUTEX_FLAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) bool mutex_is_locked(struct mutex *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	return __mutex_owner(lock) != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) EXPORT_SYMBOL(mutex_is_locked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) __must_check enum mutex_trylock_recursive_enum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) mutex_trylock_recursive(struct mutex *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	if (unlikely(__mutex_owner(lock) == current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 		return MUTEX_TRYLOCK_RECURSIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	return mutex_trylock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) EXPORT_SYMBOL(mutex_trylock_recursive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) static inline unsigned long __owner_flags(unsigned long owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	return owner & MUTEX_FLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107)  * Trylock variant that retuns the owning task on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	unsigned long owner, curr = (unsigned long)current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	owner = atomic_long_read(&lock->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	for (;;) { /* must loop, can race against a flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 		unsigned long old, flags = __owner_flags(owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 		unsigned long task = owner & ~MUTEX_FLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 		if (task) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 			if (likely(task != curr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 			if (likely(!(flags & MUTEX_FLAG_PICKUP)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 			flags &= ~MUTEX_FLAG_PICKUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) #ifdef CONFIG_DEBUG_MUTEXES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 			DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 		 * We set the HANDOFF bit, we must make sure it doesn't live
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 		 * past the point where we acquire it. This would be possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 		 * if we (accidentally) set the bit on an unlocked mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 		flags &= ~MUTEX_FLAG_HANDOFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 		old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 		if (old == owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 		owner = old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	return __owner_task(owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150)  * Actual trylock that will work on any unlocked state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) static inline bool __mutex_trylock(struct mutex *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	return !__mutex_trylock_or_owner(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) #ifndef CONFIG_DEBUG_LOCK_ALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159)  * Lockdep annotations are contained to the slow paths for simplicity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160)  * There is nothing that would stop spreading the lockdep annotations outwards
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161)  * except more code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165)  * Optimistic trylock that only works in the uncontended case. Make sure to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166)  * follow with a __mutex_trylock() before failing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	unsigned long curr = (unsigned long)current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	unsigned long zero = 0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	unsigned long curr = (unsigned long)current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	atomic_long_or(flag, &lock->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	atomic_long_andnot(flag, &lock->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206)  * Add @waiter to a given location in the lock wait_list and set the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207)  * FLAG_WAITERS flag if it's the first waiter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 		   struct list_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	bool already_on_list = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	debug_mutex_add_waiter(lock, waiter, current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	trace_android_vh_alter_mutex_list_add(lock, waiter, list, &already_on_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	if (!already_on_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 		list_add_tail(&waiter->list, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	if (__mutex_waiter_is_first(lock, waiter))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 		__mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) __mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	list_del(&waiter->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	if (likely(list_empty(&lock->wait_list)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 		__mutex_clear_flag(lock, MUTEX_FLAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	debug_mutex_remove_waiter(lock, waiter, current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234)  * Give up ownership to a specific task, when @task = NULL, this is equivalent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235)  * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236)  * WAITERS. Provides RELEASE semantics like a regular unlock, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237)  * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	unsigned long owner = atomic_long_read(&lock->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 		unsigned long old, new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) #ifdef CONFIG_DEBUG_MUTEXES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 		DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 		DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 		new = (owner & MUTEX_FLAG_WAITERS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 		new |= (unsigned long)task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 		if (task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 			new |= MUTEX_FLAG_PICKUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 		old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 		if (old == owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 		owner = old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) #ifndef CONFIG_DEBUG_LOCK_ALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266)  * We split the mutex lock/unlock logic into separate fastpath and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267)  * slowpath functions, to reduce the register pressure on the fastpath.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268)  * We also put the fastpath first in the kernel image, to make sure the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269)  * branch is predicted by the CPU as default-untaken.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) static void __sched __mutex_lock_slowpath(struct mutex *lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274)  * mutex_lock - acquire the mutex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275)  * @lock: the mutex to be acquired
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277)  * Lock the mutex exclusively for this task. If the mutex is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278)  * available right now, it will sleep until it can get it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280)  * The mutex must later on be released by the same task that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281)  * acquired it. Recursive locking is not allowed. The task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282)  * may not exit without first unlocking the mutex. Also, kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283)  * memory where the mutex resides must not be freed with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284)  * the mutex still locked. The mutex must first be initialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285)  * (or statically defined) before it can be locked. memset()-ing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286)  * the mutex to 0 is not allowed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288)  * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289)  * checks that will enforce the restrictions and will also do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290)  * deadlock debugging)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292)  * This function is similar to (but not equivalent to) down().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) void __sched mutex_lock(struct mutex *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	if (!__mutex_trylock_fast(lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 		__mutex_lock_slowpath(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) EXPORT_SYMBOL(mutex_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305)  * Wait-Die:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306)  *   The newer transactions are killed when:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307)  *     It (the new transaction) makes a request for a lock being held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308)  *     by an older transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310)  * Wound-Wait:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311)  *   The newer transactions are wounded when:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312)  *     An older transaction makes a request for a lock being held by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313)  *     the newer transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317)  * Associate the ww_mutex @ww with the context @ww_ctx under which we acquired
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318)  * it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) static __always_inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) #ifdef CONFIG_DEBUG_MUTEXES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	 * but released with a normal mutex_unlock in this call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	 * This should never happen, always use ww_mutex_unlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	DEBUG_LOCKS_WARN_ON(ww->ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	 * Not quite done after calling ww_acquire_done() ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	if (ww_ctx->contending_lock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 		 * After -EDEADLK you tried to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 		 * acquire a different ww_mutex? Bad!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 		DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 		 * You called ww_mutex_lock after receiving -EDEADLK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 		 * but 'forgot' to unlock everything else first?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 		DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 		ww_ctx->contending_lock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	 * Naughty, using a different class will lead to undefined behavior!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	ww_ctx->acquired++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	ww->ctx = ww_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362)  * Determine if context @a is 'after' context @b. IOW, @a is a younger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363)  * transaction than @b and depending on algorithm either needs to wait for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364)  * @b or die.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) static inline bool __sched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) __ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	return (signed long)(a->stamp - b->stamp) > 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374)  * Wait-Die; wake a younger waiter context (when locks held) such that it can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375)  * die.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377)  * Among waiters with context, only the first one can have other locks acquired
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378)  * already (ctx->acquired > 0), because __ww_mutex_add_waiter() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379)  * __ww_mutex_check_kill() wake any but the earliest context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) static bool __sched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) __ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	       struct ww_acquire_ctx *ww_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	if (!ww_ctx->is_wait_die)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	if (waiter->ww_ctx->acquired > 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 			__ww_ctx_stamp_after(waiter->ww_ctx, ww_ctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 		debug_mutex_wake_waiter(lock, waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 		wake_up_process(waiter->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398)  * Wound-Wait; wound a younger @hold_ctx if it holds the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400)  * Wound the lock holder if there are waiters with older transactions than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401)  * the lock holders. Even if multiple waiters may wound the lock holder,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402)  * it's sufficient that only one does.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) static bool __ww_mutex_wound(struct mutex *lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 			     struct ww_acquire_ctx *ww_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 			     struct ww_acquire_ctx *hold_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	struct task_struct *owner = __mutex_owner(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	lockdep_assert_held(&lock->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	 * Possible through __ww_mutex_add_waiter() when we race with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	 * ww_mutex_set_context_fastpath(). In that case we'll get here again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	 * through __ww_mutex_check_waiters().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	if (!hold_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	 * Can have !owner because of __mutex_unlock_slowpath(), but if owner,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	 * it cannot go away because we'll have FLAG_WAITERS set and hold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	 * wait_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	if (!owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	if (ww_ctx->acquired > 0 && __ww_ctx_stamp_after(hold_ctx, ww_ctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		hold_ctx->wounded = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		 * wake_up_process() paired with set_current_state()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 		 * inserts sufficient barriers to make sure @owner either sees
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		 * it's wounded in __ww_mutex_check_kill() or has a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 		 * wakeup pending to re-read the wounded state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 		if (owner != current)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 			wake_up_process(owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447)  * We just acquired @lock under @ww_ctx, if there are later contexts waiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448)  * behind us on the wait-list, check if they need to die, or wound us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450)  * See __ww_mutex_add_waiter() for the list-order construction; basically the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451)  * list is ordered by stamp, smallest (oldest) first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453)  * This relies on never mixing wait-die/wound-wait on the same wait-list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454)  * which is currently ensured by that being a ww_class property.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456)  * The current task must not be on the wait list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) static void __sched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) __ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	struct mutex_waiter *cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	lockdep_assert_held(&lock->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	list_for_each_entry(cur, &lock->wait_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 		if (!cur->ww_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 		if (__ww_mutex_die(lock, cur, ww_ctx) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		    __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476)  * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477)  * and wake up any waiters so they can recheck.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) static __always_inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	ww_mutex_lock_acquired(lock, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	 * The lock->ctx update should be visible on all cores before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	 * the WAITERS check is done, otherwise contended waiters might be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	 * missed. The contended waiters will either see ww_ctx == NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	 * and keep spinning, or it will acquire wait_lock, add itself
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	 * to waiter list and sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	smp_mb(); /* See comments above and below. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	 * [W] ww->ctx = ctx	    [W] MUTEX_FLAG_WAITERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	 *     MB		        MB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	 * [R] MUTEX_FLAG_WAITERS   [R] ww->ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	 * The memory barrier above pairs with the memory barrier in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	 * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	 * and/or !empty list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	 * Uh oh, we raced in fastpath, check if any of the waiters need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	 * die or wound us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	spin_lock(&lock->base.wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	__ww_mutex_check_waiters(&lock->base, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	spin_unlock(&lock->base.wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 			    struct mutex_waiter *waiter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	struct ww_mutex *ww;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	ww = container_of(lock, struct ww_mutex, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	 * If ww->ctx is set the contents are undefined, only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	 * by acquiring wait_lock there is a guarantee that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	 * they are not invalid when reading.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	 * As such, when deadlock detection needs to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	 * performed the optimistic spinning cannot be done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	 * Check this in every inner iteration because we may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	 * be racing against another thread's ww_mutex_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	 * If we aren't on the wait list yet, cancel the spin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	 * if there are waiters. We want  to avoid stealing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	 * lock from a waiter with an earlier stamp, since the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	 * other thread may already own a lock that we also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	 * need.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	 * Similarly, stop spinning if we are no longer the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	 * first waiter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	if (waiter && !__mutex_waiter_is_first(lock, waiter))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559)  * Look out! "owner" is an entirely speculative pointer access and not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560)  * reliable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562)  * "noinline" so that this function shows up on perf profiles.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) static noinline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 			 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	bool ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	while (__mutex_owner(lock) == owner) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 		 * Ensure we emit the owner->on_cpu, dereference _after_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 		 * checking lock->owner still matches owner. If that fails,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 		 * owner might point to freed memory. If it still matches,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 		 * the rcu_read_lock() ensures the memory stays valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 		barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 		 * Use vcpu_is_preempted to detect lock holder preemption issue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 		if (!owner->on_cpu || need_resched() ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 				vcpu_is_preempted(task_cpu(owner))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 			ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 		if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 			ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602)  * Initial check for entering the mutex spinning loop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) static inline int mutex_can_spin_on_owner(struct mutex *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	struct task_struct *owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	int retval = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	if (need_resched())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	owner = __mutex_owner(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	 * As lock holder preemption issue, we both skip spinning if task is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	 * on cpu or its cpu is preempted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	if (owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	 * If lock->owner is not set, the mutex has been released. Return true
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	 * such that we'll trylock in the spin path, which is a faster option
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	 * than the blocking slow path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632)  * Optimistic spinning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634)  * We try to spin for acquisition when we find that the lock owner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635)  * is currently running on a (different) CPU and while we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636)  * need to reschedule. The rationale is that if the lock owner is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637)  * running, it is likely to release the lock soon.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639)  * The mutex spinners are queued up using MCS lock so that only one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640)  * spinner can compete for the mutex. However, if mutex spinning isn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641)  * going to happen, there is no point in going through the lock/unlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642)  * overhead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644)  * Returns true when the lock was taken, otherwise false, indicating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645)  * that we need to jump to the slowpath and sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647)  * The waiter flag is set to true if the spinner is a waiter in the wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648)  * queue. The waiter-spinner will spin on the lock directly and concurrently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649)  * with the spinner at the head of the OSQ, if present, until the owner is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650)  * changed to itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) static __always_inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		      struct mutex_waiter *waiter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	if (!waiter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		 * The purpose of the mutex_can_spin_on_owner() function is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 		 * to eliminate the overhead of osq_lock() and osq_unlock()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 		 * in case spinning isn't possible. As a waiter-spinner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 		 * is not going to take OSQ lock anyway, there is no need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		 * to call mutex_can_spin_on_owner().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		if (!mutex_can_spin_on_owner(lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 			goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		 * In order to avoid a stampede of mutex spinners trying to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		 * acquire the mutex all at once, the spinners need to take a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 		 * MCS (queued) lock first before spinning on the owner field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 		if (!osq_lock(&lock->osq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 			goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 		struct task_struct *owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		/* Try to acquire the mutex... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 		owner = __mutex_trylock_or_owner(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 		if (!owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 		 * There's an owner, wait for it to either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		 * release the lock or go to sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 		if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 			goto fail_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		 * The cpu_relax() call is a compiler barrier which forces
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 		 * everything in this loop to be re-loaded. We don't need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 		 * memory barriers as we'll eventually observe the right
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 		 * values at the cost of a few extra spins.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	if (!waiter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 		osq_unlock(&lock->osq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) fail_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	if (!waiter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 		osq_unlock(&lock->osq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	 * If we fell out of the spin path because of need_resched(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	 * reschedule now, before we try-lock the mutex. This avoids getting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	 * scheduled out right after we obtained the mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	if (need_resched()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 		 * We _should_ have TASK_RUNNING here, but just in case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 		 * we do not, make it so, otherwise we might get stuck.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 		__set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		schedule_preempt_disabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) static __always_inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 		      struct mutex_waiter *waiter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739)  * mutex_unlock - release the mutex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740)  * @lock: the mutex to be released
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742)  * Unlock a mutex that has been locked by this task previously.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744)  * This function must not be used in interrupt context. Unlocking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745)  * of a not locked mutex is not allowed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747)  * This function is similar to (but not equivalent to) up().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) void __sched mutex_unlock(struct mutex *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) #ifndef CONFIG_DEBUG_LOCK_ALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	if (__mutex_unlock_fast(lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	__mutex_unlock_slowpath(lock, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) EXPORT_SYMBOL(mutex_unlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760)  * ww_mutex_unlock - release the w/w mutex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761)  * @lock: the mutex to be released
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763)  * Unlock a mutex that has been locked by this task previously with any of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764)  * ww_mutex_lock* functions (with or without an acquire context). It is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765)  * forbidden to release the locks after releasing the acquire context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767)  * This function must not be used in interrupt context. Unlocking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768)  * of a unlocked mutex is not allowed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) void __sched ww_mutex_unlock(struct ww_mutex *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	 * The unlocking fastpath is the 0->1 transition from 'locked'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	 * into 'unlocked' state:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	if (lock->ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) #ifdef CONFIG_DEBUG_MUTEXES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		if (lock->ctx->acquired > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 			lock->ctx->acquired--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 		lock->ctx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	mutex_unlock(&lock->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) EXPORT_SYMBOL(ww_mutex_unlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) static __always_inline int __sched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) __ww_mutex_kill(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	if (ww_ctx->acquired > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) #ifdef CONFIG_DEBUG_MUTEXES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		struct ww_mutex *ww;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 		ww = container_of(lock, struct ww_mutex, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 		DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		ww_ctx->contending_lock = ww;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		return -EDEADLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809)  * Check the wound condition for the current lock acquire.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811)  * Wound-Wait: If we're wounded, kill ourself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813)  * Wait-Die: If we're trying to acquire a lock already held by an older
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814)  *           context, kill ourselves.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816)  * Since __ww_mutex_add_waiter() orders the wait-list on stamp, we only have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817)  * look at waiters before us in the wait-list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) static inline int __sched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) __ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 		      struct ww_acquire_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	struct mutex_waiter *cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	if (ctx->acquired == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	if (!ctx->is_wait_die) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		if (ctx->wounded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 			return __ww_mutex_kill(lock, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 		return __ww_mutex_kill(lock, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	 * If there is a waiter in front of us that has a context, then its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	 * stamp is earlier than ours and we must kill ourself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	cur = waiter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		if (!cur->ww_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		return __ww_mutex_kill(lock, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856)  * Add @waiter to the wait-list, keep the wait-list ordered by stamp, smallest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857)  * first. Such that older contexts are preferred to acquire the lock over
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858)  * younger contexts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860)  * Waiters without context are interspersed in FIFO order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862)  * Furthermore, for Wait-Die kill ourself immediately when possible (there are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863)  * older contexts already waiting) to avoid unnecessary waiting and for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864)  * Wound-Wait ensure we wound the owning context when it is younger.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) static inline int __sched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) __ww_mutex_add_waiter(struct mutex_waiter *waiter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		      struct mutex *lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 		      struct ww_acquire_ctx *ww_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	struct mutex_waiter *cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	struct list_head *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	bool is_wait_die;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	if (!ww_ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 		__mutex_add_waiter(lock, waiter, &lock->wait_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	is_wait_die = ww_ctx->is_wait_die;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	 * Add the waiter before the first waiter with a higher stamp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	 * Waiters without a context are skipped to avoid starving
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	 * them. Wait-Die waiters may die here. Wound-Wait waiters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	 * never die here, but they are sorted in stamp order and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	 * may wound the lock holder.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	pos = &lock->wait_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	list_for_each_entry_reverse(cur, &lock->wait_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		if (!cur->ww_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 		if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 			 * Wait-Die: if we find an older context waiting, there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 			 * is no point in queueing behind it, as we'd have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 			 * die the moment it would acquire the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 			if (is_wait_die) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 				int ret = __ww_mutex_kill(lock, ww_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 				if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 					return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		pos = &cur->list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		/* Wait-Die: ensure younger waiters die. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		__ww_mutex_die(lock, cur, ww_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	__mutex_add_waiter(lock, waiter, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	 * Wound-Wait: if we're blocking on a mutex owned by a younger context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	 * wound that such that we might proceed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	if (!is_wait_die) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		 * See ww_mutex_set_context_fastpath(). Orders setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		 * MUTEX_FLAG_WAITERS vs the ww->ctx load,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		 * such that either we or the fastpath will wound @ww->ctx.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 		smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		__ww_mutex_wound(lock, ww_ctx, ww->ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938)  * Lock a mutex (possibly interruptible), slowpath:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) static __always_inline int __sched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		    struct lockdep_map *nest_lock, unsigned long ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		    struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	struct mutex_waiter waiter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	struct ww_mutex *ww;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	if (!use_ww_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 		ww_ctx = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) #ifdef CONFIG_DEBUG_MUTEXES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	DEBUG_LOCKS_WARN_ON(lock->magic != lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	ww = container_of(lock, struct ww_mutex, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	if (ww_ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 		if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 			return -EALREADY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 		 * Reset the wounded flag after a kill. No other process can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 		 * race and wound us here since they can't have a valid owner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 		 * pointer if we don't have any locks held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		if (ww_ctx->acquired == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 			ww_ctx->wounded = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	if (__mutex_trylock(lock) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	    mutex_optimistic_spin(lock, ww_ctx, NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		/* got the lock, yay! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 		lock_acquired(&lock->dep_map, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		if (ww_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 			ww_mutex_set_context_fastpath(ww, ww_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	spin_lock(&lock->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	 * After waiting to acquire the wait_lock, try again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	if (__mutex_trylock(lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 		if (ww_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 			__ww_mutex_check_waiters(lock, ww_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 		goto skip_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	debug_mutex_lock_common(lock, &waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	lock_contended(&lock->dep_map, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	if (!use_ww_ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 		/* add waiting tasks to the end of the waitqueue (FIFO): */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 		__mutex_add_waiter(lock, &waiter, &lock->wait_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) #ifdef CONFIG_DEBUG_MUTEXES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 		waiter.ww_ctx = MUTEX_POISON_WW_CTX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 		 * Add in stamp order, waking up waiters that must kill
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		 * themselves.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 		ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 			goto err_early_kill;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 		waiter.ww_ctx = ww_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	waiter.task = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	trace_android_vh_mutex_wait_start(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	set_current_state(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		bool first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 		 * Once we hold wait_lock, we're serialized against
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 		 * mutex_unlock() handing the lock off to us, do a trylock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 		 * before testing the error conditions to make sure we pick up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		 * the handoff.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 		if (__mutex_trylock(lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 			goto acquired;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 		 * Check for signals and kill conditions while holding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		 * wait_lock. This ensures the lock cancellation is ordered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 		 * against mutex_unlock() and wake-ups do not go missing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		if (signal_pending_state(state, current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 			ret = -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 			goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 		if (ww_ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 			ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 				goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 		trace_android_vh_mutex_start_check_new_owner(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 		spin_unlock(&lock->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 		schedule_preempt_disabled();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 		first = __mutex_waiter_is_first(lock, &waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 		if (first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 			__mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 		set_current_state(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 		 * Here we order against unlock; we must either see it change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 		 * state back to RUNNING and fall through the next schedule(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		 * or we must see its unlock and acquire.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 		if (__mutex_trylock(lock) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 		    (first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		spin_lock(&lock->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	spin_lock(&lock->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) acquired:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	__set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	trace_android_vh_mutex_wait_finish(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	if (ww_ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		 * Wound-Wait; we stole the lock (!first_waiter), check the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 		 * waiters as anyone might want to wound us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 		if (!ww_ctx->is_wait_die &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		    !__mutex_waiter_is_first(lock, &waiter))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 			__ww_mutex_check_waiters(lock, ww_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	__mutex_remove_waiter(lock, &waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	debug_mutex_free_waiter(&waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) skip_wait:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	/* got the lock - cleanup and rejoice! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	lock_acquired(&lock->dep_map, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	if (ww_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		ww_mutex_lock_acquired(ww, ww_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	spin_unlock(&lock->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	__set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	trace_android_vh_mutex_wait_finish(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	__mutex_remove_waiter(lock, &waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) err_early_kill:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	spin_unlock(&lock->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	debug_mutex_free_waiter(&waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	mutex_release(&lock->dep_map, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) static int __sched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) __mutex_lock(struct mutex *lock, long state, unsigned int subclass,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	     struct lockdep_map *nest_lock, unsigned long ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) static int __sched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) __ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 		struct lockdep_map *nest_lock, unsigned long ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 		struct ww_acquire_ctx *ww_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) #ifdef CONFIG_DEBUG_LOCK_ALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) void __sched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) mutex_lock_nested(struct mutex *lock, unsigned int subclass)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) EXPORT_SYMBOL_GPL(mutex_lock_nested);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) void __sched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) int __sched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) int __sched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) void __sched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	int token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	token = io_schedule_prepare();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 			    subclass, NULL, _RET_IP_, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	io_schedule_finish(token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	unsigned tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	if (ctx->deadlock_inject_countdown-- == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 		tmp = ctx->deadlock_inject_interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 		if (tmp > UINT_MAX/4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 			tmp = UINT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 			tmp = tmp*2 + tmp + tmp/2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 		ctx->deadlock_inject_interval = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 		ctx->deadlock_inject_countdown = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 		ctx->contending_lock = lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 		ww_mutex_unlock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 		return -EDEADLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) int __sched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	ret =  __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 			       0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 			       ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	if (!ret && ctx && ctx->acquired > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 		return ww_mutex_deadlock_injection(lock, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) EXPORT_SYMBOL_GPL(ww_mutex_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) int __sched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 			      0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 			      ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	if (!ret && ctx && ctx->acquired > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 		return ww_mutex_deadlock_injection(lock, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)  * Release the lock, slowpath:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	struct task_struct *next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	DEFINE_WAKE_Q(wake_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	unsigned long owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	mutex_release(&lock->dep_map, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	 * Release the lock before (potentially) taking the spinlock such that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	 * other contenders can get on with things ASAP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	 * Except when HANDOFF, in that case we must not clear the owner field,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	 * but instead set it to the top waiter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	owner = atomic_long_read(&lock->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 		unsigned long old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) #ifdef CONFIG_DEBUG_MUTEXES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 		DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 		DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 		if (owner & MUTEX_FLAG_HANDOFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 		old = atomic_long_cmpxchg_release(&lock->owner, owner,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 						  __owner_flags(owner));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 		if (old == owner) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 			if (owner & MUTEX_FLAG_WAITERS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 		owner = old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	spin_lock(&lock->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	debug_mutex_unlock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	if (!list_empty(&lock->wait_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 		/* get the first entry from the wait-list: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 		struct mutex_waiter *waiter =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 			list_first_entry(&lock->wait_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 					 struct mutex_waiter, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 		next = waiter->task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 		debug_mutex_wake_waiter(lock, waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 		wake_q_add(&wake_q, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	if (owner & MUTEX_FLAG_HANDOFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 		__mutex_handoff(lock, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	trace_android_vh_mutex_unlock_slowpath(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	spin_unlock(&lock->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	wake_up_q(&wake_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	trace_android_vh_mutex_unlock_slowpath_end(lock, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) #ifndef CONFIG_DEBUG_LOCK_ALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)  * Here come the less common (and hence less performance-critical) APIs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)  * mutex_lock_interruptible() and mutex_trylock().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) static noinline int __sched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) __mutex_lock_killable_slowpath(struct mutex *lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) static noinline int __sched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) __mutex_lock_interruptible_slowpath(struct mutex *lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)  * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)  * @lock: The mutex to be acquired.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)  * Lock the mutex like mutex_lock().  If a signal is delivered while the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)  * process is sleeping, this function will return without acquiring the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)  * mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)  * Context: Process context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)  * Return: 0 if the lock was successfully acquired or %-EINTR if a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)  * signal arrived.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) int __sched mutex_lock_interruptible(struct mutex *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	if (__mutex_trylock_fast(lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	return __mutex_lock_interruptible_slowpath(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) EXPORT_SYMBOL(mutex_lock_interruptible);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)  * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)  * @lock: The mutex to be acquired.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)  * Lock the mutex like mutex_lock().  If a signal which will be fatal to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)  * the current process is delivered while the process is sleeping, this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)  * function will return without acquiring the mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)  * Context: Process context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)  * Return: 0 if the lock was successfully acquired or %-EINTR if a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)  * fatal signal arrived.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) int __sched mutex_lock_killable(struct mutex *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	if (__mutex_trylock_fast(lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	return __mutex_lock_killable_slowpath(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) EXPORT_SYMBOL(mutex_lock_killable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)  * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360)  * @lock: The mutex to be acquired.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)  * Lock the mutex like mutex_lock().  While the task is waiting for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)  * mutex, it will be accounted as being in the IO wait state by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)  * scheduler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)  * Context: Process context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) void __sched mutex_lock_io(struct mutex *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	int token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	token = io_schedule_prepare();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	mutex_lock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	io_schedule_finish(token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) EXPORT_SYMBOL_GPL(mutex_lock_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) static noinline void __sched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) __mutex_lock_slowpath(struct mutex *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	__mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) static noinline int __sched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) __mutex_lock_killable_slowpath(struct mutex *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) static noinline int __sched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) __mutex_lock_interruptible_slowpath(struct mutex *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 	return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) static noinline int __sched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 			       _RET_IP_, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) static noinline int __sched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 					    struct ww_acquire_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 			       _RET_IP_, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)  * mutex_trylock - try to acquire the mutex, without waiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)  * @lock: the mutex to be acquired
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)  * Try to acquire the mutex atomically. Returns 1 if the mutex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)  * has been acquired successfully, and 0 on contention.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420)  * NOTE: this function follows the spin_trylock() convention, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)  * it is negated from the down_trylock() return values! Be careful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)  * about this when converting semaphore users to mutexes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)  * This function must not be used in interrupt context. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)  * mutex must be released by the same task that acquired it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) int __sched mutex_trylock(struct mutex *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	bool locked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) #ifdef CONFIG_DEBUG_MUTEXES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	DEBUG_LOCKS_WARN_ON(lock->magic != lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	locked = __mutex_trylock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	if (locked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	return locked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) EXPORT_SYMBOL(mutex_trylock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) #ifndef CONFIG_DEBUG_LOCK_ALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) int __sched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 	might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	if (__mutex_trylock_fast(&lock->base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 		if (ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 			ww_mutex_set_context_fastpath(lock, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	return __ww_mutex_lock_slowpath(lock, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) EXPORT_SYMBOL(ww_mutex_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) int __sched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	if (__mutex_trylock_fast(&lock->base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 		if (ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 			ww_mutex_set_context_fastpath(lock, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) EXPORT_SYMBOL(ww_mutex_lock_interruptible);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)  * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478)  * @cnt: the atomic which we are to dec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)  * @lock: the mutex to return holding if we dec to 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)  * return true and hold lock if we dec to 0, return false otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	/* dec if we can't possibly hit 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	if (atomic_add_unless(cnt, -1, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	/* we might hit 0, so take the lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	mutex_lock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	if (!atomic_dec_and_test(cnt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 		/* when we actually did the dec, we didn't hit 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 		mutex_unlock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	/* we hit 0, and we hold the lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) EXPORT_SYMBOL(atomic_dec_and_mutex_lock);