^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * RT-Mutexes: simple blocking mutual exclusion locks with PI support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * started by Ingo Molnar and Thomas Gleixner.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Copyright (C) 2006 Esben Nielsen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * See Documentation/locking/rt-mutex-design.rst for details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/sched/rt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/sched/deadline.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/sched/wake_q.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/sched/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <trace/hooks/dtask.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "rtmutex_common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * lock->owner state tracking:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * lock->owner holds the task_struct pointer of the owner. Bit 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * is used to keep track of the "lock has waiters" state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * owner bit0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * NULL 0 lock is free (fast acquire possible)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * NULL 1 lock is free and has waiters and the top waiter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * is going to take the lock*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * taskpointer 0 lock is held (fast release possible)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * taskpointer 1 lock is held and has waiters**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * The fast atomic compare exchange based acquire and release is only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * possible when bit 0 of lock->owner is 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * (*) It also can be a transitional state when grabbing the lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * we need to set the bit0 before looking at the lock, and the owner may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * NULL in this small time, hence this can be a transitional state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * (**) There is a small time when bit 0 is set but there are no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * waiters. This can happen when grabbing the lock in the slow path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * To prevent a cmpxchg of the owner releasing the lock, we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * set this bit before looking at the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) unsigned long val = (unsigned long)owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if (rt_mutex_has_waiters(lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) val |= RT_MUTEX_HAS_WAITERS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) WRITE_ONCE(lock->owner, (struct task_struct *)val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) lock->owner = (struct task_struct *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) unsigned long owner, *p = (unsigned long *) &lock->owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (rt_mutex_has_waiters(lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * The rbtree has no waiters enqueued, now make sure that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * lock->owner still has the waiters bit set, otherwise the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * following can happen:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * CPU 0 CPU 1 CPU2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * l->owner=T1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * rt_mutex_lock(l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * lock(l->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * l->owner = T1 | HAS_WAITERS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * enqueue(T2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * boost()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * unlock(l->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * block()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * rt_mutex_lock(l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * lock(l->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * l->owner = T1 | HAS_WAITERS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * enqueue(T3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * boost()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * unlock(l->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * block()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * signal(->T2) signal(->T3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * lock(l->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * dequeue(T2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * deboost()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * unlock(l->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * lock(l->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * dequeue(T3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * ==> wait list is empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * deboost()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * unlock(l->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * lock(l->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * fixup_rt_mutex_waiters()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * if (wait_list_empty(l) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * l->owner = owner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * owner = l->owner & ~HAS_WAITERS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * ==> l->owner = T1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * lock(l->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * rt_mutex_unlock(l) fixup_rt_mutex_waiters()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * if (wait_list_empty(l) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * owner = l->owner & ~HAS_WAITERS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * cmpxchg(l->owner, T1, NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * ===> Success (l->owner = NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * l->owner = owner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * ==> l->owner = T1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * With the check for the waiter bit in place T3 on CPU2 will not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * overwrite. All tasks fiddling with the waiters bit are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * serialized by l->lock, so nothing else can modify the waiters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * bit. If the bit is set then nothing can change l->owner either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * so the simple RMW is safe. The cmpxchg() will simply fail if it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * happens in the middle of the RMW because the waiters bit is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * still set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) owner = READ_ONCE(*p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (owner & RT_MUTEX_HAS_WAITERS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * We can speed up the acquire/release, if there's no debugging state to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * set up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #ifndef CONFIG_DEBUG_RT_MUTEXES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) # define rt_mutex_cmpxchg_acquire(l,c,n) (cmpxchg_acquire(&l->owner, c, n) == c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) # define rt_mutex_cmpxchg_release(l,c,n) (cmpxchg_release(&l->owner, c, n) == c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * Callers must hold the ->wait_lock -- which is the whole purpose as we force
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * all future threads that attempt to [Rmw] the lock to the slowpath. As such
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * relaxed semantics suffice.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) unsigned long owner, *p = (unsigned long *) &lock->owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) owner = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) } while (cmpxchg_relaxed(p, owner,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) owner | RT_MUTEX_HAS_WAITERS) != owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * Safe fastpath aware unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * 1) Clear the waiters bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * 2) Drop lock->wait_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * 3) Try to unlock the lock with cmpxchg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) __releases(lock->wait_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct task_struct *owner = rt_mutex_owner(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) clear_rt_mutex_waiters(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * If a new waiter comes in between the unlock and the cmpxchg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * we have two situations:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * unlock(wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * lock(wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * cmpxchg(p, owner, 0) == owner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * mark_rt_mutex_waiters(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * acquire(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * or:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * unlock(wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * lock(wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * mark_rt_mutex_waiters(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * cmpxchg(p, owner, 0) != owner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * enqueue_waiter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * unlock(wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * lock(wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * wake waiter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * unlock(wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * lock(wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * acquire(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) return rt_mutex_cmpxchg_release(lock, owner, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) # define rt_mutex_cmpxchg_acquire(l,c,n) (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) # define rt_mutex_cmpxchg_release(l,c,n) (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) lock->owner = (struct task_struct *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * Simple slow path only version: lock->owner is protected by lock->wait_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) __releases(lock->wait_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) lock->owner = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * Only use with rt_mutex_waiter_{less,equal}()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) #define task_to_waiter(p) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) rt_mutex_waiter_less(struct rt_mutex_waiter *left,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) struct rt_mutex_waiter *right)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (left->prio < right->prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * If both waiters have dl_prio(), we check the deadlines of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * associated tasks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * If left waiter has a dl_prio(), and we didn't return 1 above,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * then right waiter has a dl_prio() too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if (dl_prio(left->prio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return dl_time_before(left->deadline, right->deadline);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) struct rt_mutex_waiter *right)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (left->prio != right->prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * If both waiters have dl_prio(), we check the deadlines of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * associated tasks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * If left waiter has a dl_prio(), and we didn't return 0 above,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * then right waiter has a dl_prio() too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (dl_prio(left->prio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) return left->deadline == right->deadline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) struct rb_node **link = &lock->waiters.rb_root.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) struct rb_node *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) struct rt_mutex_waiter *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) bool leftmost = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) while (*link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) parent = *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (rt_mutex_waiter_less(waiter, entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) link = &parent->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) link = &parent->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) leftmost = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) rb_link_node(&waiter->tree_entry, parent, link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) rb_insert_color_cached(&waiter->tree_entry, &lock->waiters, leftmost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (RB_EMPTY_NODE(&waiter->tree_entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) rb_erase_cached(&waiter->tree_entry, &lock->waiters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) RB_CLEAR_NODE(&waiter->tree_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct rb_node **link = &task->pi_waiters.rb_root.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) struct rb_node *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) struct rt_mutex_waiter *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) bool leftmost = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) while (*link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) parent = *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (rt_mutex_waiter_less(waiter, entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) link = &parent->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) link = &parent->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) leftmost = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) rb_link_node(&waiter->pi_tree_entry, parent, link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) rb_insert_color_cached(&waiter->pi_tree_entry, &task->pi_waiters, leftmost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (RB_EMPTY_NODE(&waiter->pi_tree_entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) rb_erase_cached(&waiter->pi_tree_entry, &task->pi_waiters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) RB_CLEAR_NODE(&waiter->pi_tree_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) static void rt_mutex_adjust_prio(struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) struct task_struct *pi_task = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) lockdep_assert_held(&p->pi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) if (task_has_pi_waiters(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) pi_task = task_top_pi_waiter(p)->task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) rt_mutex_setprio(p, pi_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * Deadlock detection is conditional:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * If CONFIG_DEBUG_RT_MUTEXES=n, deadlock detection is only conducted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * if the detect argument is == RT_MUTEX_FULL_CHAINWALK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * If CONFIG_DEBUG_RT_MUTEXES=y, deadlock detection is always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * conducted independent of the detect argument.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * If the waiter argument is NULL this indicates the deboost path and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * deadlock detection is disabled independent of the detect argument
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * and the config settings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) enum rtmutex_chainwalk chwalk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * This is just a wrapper function for the following call,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * because debug_rt_mutex_detect_deadlock() smells like a magic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * debug feature and I wanted to keep the cond function in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * main source file along with the comments instead of having
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * two of the same in the headers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) return debug_rt_mutex_detect_deadlock(waiter, chwalk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * Max number of times we'll walk the boosting chain:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) int max_lock_depth = 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * Adjust the priority chain. Also used for deadlock detection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * Decreases task's usage by one - may thus free the task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * @task: the task owning the mutex (owner) for which a chain walk is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * probably needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * @chwalk: do we have to carry out deadlock detection?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * things for a task that has just got its priority adjusted, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * is waiting on a mutex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * @next_lock: the mutex on which the owner of @orig_lock was blocked before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * we dropped its pi_lock. Is never dereferenced, only used for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * comparison to detect lock chain changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * @orig_waiter: rt_mutex_waiter struct for the task that has just donated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * its priority to the mutex owner (can be NULL in the case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * depicted above or if the top waiter is gone away and we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * actually deboosting the owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * @top_task: the current top waiter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * Returns 0 or -EDEADLK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) * Chain walk basics and protection scope
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) * [R] refcount on task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * [P] task->pi_lock held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * [L] rtmutex->wait_lock held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * Step Description Protected by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * function arguments:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * @task [R]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * @orig_lock if != NULL @top_task is blocked on it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) * @next_lock Unprotected. Cannot be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * dereferenced. Only used for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * comparison.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * @orig_waiter if != NULL @top_task is blocked on it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * @top_task current, or in case of proxy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * locking protected by calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * loop_sanity_check();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * [1] lock(task->pi_lock); [R] acquire [P]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * [2] waiter = task->pi_blocked_on; [P]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * [3] check_exit_conditions_1(); [P]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * [4] lock = waiter->lock; [P]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * [5] if (!try_lock(lock->wait_lock)) { [P] try to acquire [L]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * unlock(task->pi_lock); release [P]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * [6] check_exit_conditions_2(); [P] + [L]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * [7] requeue_lock_waiter(lock, waiter); [P] + [L]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * [8] unlock(task->pi_lock); release [P]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * put_task_struct(task); release [R]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * [9] check_exit_conditions_3(); [L]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * [10] task = owner(lock); [L]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * get_task_struct(task); [L] acquire [R]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * lock(task->pi_lock); [L] acquire [P]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * [11] requeue_pi_waiter(tsk, waiters(lock));[P] + [L]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * [12] check_exit_conditions_4(); [P] + [L]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * [13] unlock(task->pi_lock); release [P]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * unlock(lock->wait_lock); release [L]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) static int rt_mutex_adjust_prio_chain(struct task_struct *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) enum rtmutex_chainwalk chwalk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) struct rt_mutex *orig_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) struct rt_mutex *next_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) struct rt_mutex_waiter *orig_waiter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) struct task_struct *top_task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) struct rt_mutex_waiter *prerequeue_top_waiter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) int ret = 0, depth = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) struct rt_mutex *lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) bool detect_deadlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) bool requeue = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * The (de)boosting is a step by step approach with a lot of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * pitfalls. We want this to be preemptible and we want hold a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * maximum of two locks per step. So we have to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * carefully whether things change under us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) * We limit the lock chain length for each invocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (++depth > max_lock_depth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) static int prev_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * Print this only once. If the admin changes the limit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * print a new message when reaching the limit again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (prev_max != max_lock_depth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) prev_max = max_lock_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) printk(KERN_WARNING "Maximum lock depth %d reached "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) "task: %s (%d)\n", max_lock_depth,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) top_task->comm, task_pid_nr(top_task));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) put_task_struct(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) return -EDEADLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * We are fully preemptible here and only hold the refcount on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * @task. So everything can have changed under us since the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * caller or our own code below (goto retry/again) dropped all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * locks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * [1] Task cannot go away as we did a get_task() before !
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) raw_spin_lock_irq(&task->pi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * [2] Get the waiter on which @task is blocked on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) waiter = task->pi_blocked_on;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * [3] check_exit_conditions_1() protected by task->pi_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * Check whether the end of the boosting chain has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * reached or the state of the chain has changed while we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * dropped the locks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) if (!waiter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) goto out_unlock_pi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * Check the orig_waiter state. After we dropped the locks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * the previous owner of the lock might have released the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) if (orig_waiter && !rt_mutex_owner(orig_lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) goto out_unlock_pi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * We dropped all locks after taking a refcount on @task, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * the task might have moved on in the lock chain or even left
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * the chain completely and blocks now on an unrelated lock or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * on @orig_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * We stored the lock on which @task was blocked in @next_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * so we can detect the chain change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (next_lock != waiter->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) goto out_unlock_pi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * Drop out, when the task has no waiters. Note,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * top_waiter can be NULL, when we are in the deboosting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) * mode!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) if (top_waiter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (!task_has_pi_waiters(task))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) goto out_unlock_pi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * If deadlock detection is off, we stop here if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * are not the top pi waiter of the task. If deadlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * detection is enabled we continue, but stop the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * requeueing in the chain walk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) if (top_waiter != task_top_pi_waiter(task)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (!detect_deadlock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) goto out_unlock_pi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) requeue = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * If the waiter priority is the same as the task priority
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * then there is no further priority adjustment necessary. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * deadlock detection is off, we stop the chain walk. If its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) * enabled we continue, but stop the requeueing in the chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * walk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) if (rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (!detect_deadlock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) goto out_unlock_pi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) requeue = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) * [4] Get the next lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) lock = waiter->lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * [5] We need to trylock here as we are holding task->pi_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * which is the reverse lock order versus the other rtmutex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) * operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (!raw_spin_trylock(&lock->wait_lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) raw_spin_unlock_irq(&task->pi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) * [6] check_exit_conditions_2() protected by task->pi_lock and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) * lock->wait_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) * Deadlock detection. If the lock is the same as the original
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) * lock which caused us to walk the lock chain or if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) * current lock is owned by the task which initiated the chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) * walk, we detected a deadlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) debug_rt_mutex_deadlock(chwalk, orig_waiter, lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) raw_spin_unlock(&lock->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) ret = -EDEADLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) goto out_unlock_pi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) * If we just follow the lock chain for deadlock detection, no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) * need to do all the requeue operations. To avoid a truckload
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * of conditionals around the various places below, just do the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) * minimum chain walk checks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (!requeue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * No requeue[7] here. Just release @task [8]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) raw_spin_unlock(&task->pi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) put_task_struct(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * [9] check_exit_conditions_3 protected by lock->wait_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * If there is no owner of the lock, end of chain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (!rt_mutex_owner(lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) raw_spin_unlock_irq(&lock->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) /* [10] Grab the next task, i.e. owner of @lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) task = get_task_struct(rt_mutex_owner(lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) raw_spin_lock(&task->pi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) * No requeue [11] here. We just do deadlock detection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * [12] Store whether owner is blocked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * itself. Decision is made after dropping the locks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) next_lock = task_blocked_on_lock(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * Get the top waiter for the next iteration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) top_waiter = rt_mutex_top_waiter(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) /* [13] Drop locks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) raw_spin_unlock(&task->pi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) raw_spin_unlock_irq(&lock->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) /* If owner is not blocked, end of chain. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if (!next_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) goto out_put_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * Store the current top waiter before doing the requeue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * operation on @lock. We need it for the boost/deboost
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) * decision below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) prerequeue_top_waiter = rt_mutex_top_waiter(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) /* [7] Requeue the waiter in the lock waiter tree. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) rt_mutex_dequeue(lock, waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) * Update the waiter prio fields now that we're dequeued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) * These values can have changed through either:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) * sys_sched_set_scheduler() / sys_sched_setattr()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) * or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) * DL CBS enforcement advancing the effective deadline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * Even though pi_waiters also uses these fields, and that tree is only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * updated in [11], we can do this here, since we hold [L], which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) * serializes all pi_waiters access and rb_erase() does not care about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * the values of the node being removed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) waiter->prio = task->prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) waiter->deadline = task->dl.deadline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) rt_mutex_enqueue(lock, waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) /* [8] Release the task */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) raw_spin_unlock(&task->pi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) put_task_struct(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) * [9] check_exit_conditions_3 protected by lock->wait_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) * We must abort the chain walk if there is no lock owner even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) * in the dead lock detection case, as we have nothing to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) * follow here. This is the end of the chain we are walking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) if (!rt_mutex_owner(lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) * If the requeue [7] above changed the top waiter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) * then we need to wake the new top waiter up to try
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) * to get the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) if (prerequeue_top_waiter != rt_mutex_top_waiter(lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) wake_up_process(rt_mutex_top_waiter(lock)->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) raw_spin_unlock_irq(&lock->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) /* [10] Grab the next task, i.e. the owner of @lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) task = get_task_struct(rt_mutex_owner(lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) raw_spin_lock(&task->pi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) /* [11] requeue the pi waiters if necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (waiter == rt_mutex_top_waiter(lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) * The waiter became the new top (highest priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) * waiter on the lock. Replace the previous top waiter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) * in the owner tasks pi waiters tree with this waiter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) * and adjust the priority of the owner.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) rt_mutex_dequeue_pi(task, prerequeue_top_waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) rt_mutex_enqueue_pi(task, waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) rt_mutex_adjust_prio(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) } else if (prerequeue_top_waiter == waiter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * The waiter was the top waiter on the lock, but is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * no longer the top prority waiter. Replace waiter in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * the owner tasks pi waiters tree with the new top
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * (highest priority) waiter and adjust the priority
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * of the owner.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * The new top waiter is stored in @waiter so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) * @waiter == @top_waiter evaluates to true below and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * we continue to deboost the rest of the chain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) rt_mutex_dequeue_pi(task, waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) waiter = rt_mutex_top_waiter(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) rt_mutex_enqueue_pi(task, waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) rt_mutex_adjust_prio(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * Nothing changed. No need to do any priority
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) * adjustment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * [12] check_exit_conditions_4() protected by task->pi_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) * and lock->wait_lock. The actual decisions are made after we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) * dropped the locks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) * Check whether the task which owns the current lock is pi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * blocked itself. If yes we store a pointer to the lock for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * the lock chain change detection above. After we dropped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * task->pi_lock next_lock cannot be dereferenced anymore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) next_lock = task_blocked_on_lock(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) * Store the top waiter of @lock for the end of chain walk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) * decision below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) top_waiter = rt_mutex_top_waiter(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) /* [13] Drop the locks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) raw_spin_unlock(&task->pi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) raw_spin_unlock_irq(&lock->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * Make the actual exit decisions [12], based on the stored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) * values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) * We reached the end of the lock chain. Stop right here. No
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) * point to go back just to figure that out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) if (!next_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) goto out_put_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) * If the current waiter is not the top waiter on the lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) * then we can stop the chain walk here if we are not in full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) * deadlock detection mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (!detect_deadlock && waiter != top_waiter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) goto out_put_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) out_unlock_pi:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) raw_spin_unlock_irq(&task->pi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) out_put_task:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) put_task_struct(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) * Try to take an rt-mutex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * Must be called with lock->wait_lock held and interrupts disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * @lock: The lock to be acquired.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * @task: The task which wants to acquire the lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) * @waiter: The waiter that is queued to the lock's wait tree if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) * callsite called task_blocked_on_lock(), otherwise NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) struct rt_mutex_waiter *waiter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) lockdep_assert_held(&lock->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) * Before testing whether we can acquire @lock, we set the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) * other tasks which try to modify @lock into the slow path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) * and they serialize on @lock->wait_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) * The RT_MUTEX_HAS_WAITERS bit can have a transitional state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) * as explained at the top of this file if and only if:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) * - There is a lock owner. The caller must fixup the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) * transient state if it does a trylock or leaves the lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) * function due to a signal or timeout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) * - @task acquires the lock and there are no other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) * waiters. This is undone in rt_mutex_set_owner(@task) at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) * the end of this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) mark_rt_mutex_waiters(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) * If @lock has an owner, give up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (rt_mutex_owner(lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * If @waiter != NULL, @task has already enqueued the waiter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * into @lock waiter tree. If @waiter == NULL then this is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) * trylock attempt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (waiter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) * If waiter is not the highest priority waiter of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) * @lock, give up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) if (waiter != rt_mutex_top_waiter(lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) * We can acquire the lock. Remove the waiter from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) * lock waiters tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) rt_mutex_dequeue(lock, waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) * If the lock has waiters already we check whether @task is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) * eligible to take over the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) * If there are no other waiters, @task can acquire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) * the lock. @task->pi_blocked_on is NULL, so it does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) * not need to be dequeued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) if (rt_mutex_has_waiters(lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) * If @task->prio is greater than or equal to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) * the top waiter priority (kernel view),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) * @task lost.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) if (!rt_mutex_waiter_less(task_to_waiter(task),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) rt_mutex_top_waiter(lock)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * The current top waiter stays enqueued. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) * don't have to change anything in the lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) * waiters order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * No waiters. Take the lock without the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) * pi_lock dance.@task->pi_blocked_on is NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) * and we have no waiters to enqueue in @task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) * pi waiters tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) goto takeit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) * Clear @task->pi_blocked_on. Requires protection by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) * @task->pi_lock. Redundant operation for the @waiter == NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) * case, but conditionals are more expensive than a redundant
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) * store.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) raw_spin_lock(&task->pi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) task->pi_blocked_on = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) * Finish the lock acquisition. @task is the new owner. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) * other waiters exist we have to insert the highest priority
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) * waiter into @task->pi_waiters tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) if (rt_mutex_has_waiters(lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) raw_spin_unlock(&task->pi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) takeit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) /* We got the lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) debug_rt_mutex_lock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) * This either preserves the RT_MUTEX_HAS_WAITERS bit if there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * are still waiters or clears it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) rt_mutex_set_owner(lock, task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * Task blocks on lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) * Prepare waiter and propagate pi chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * This must be called with lock->wait_lock held and interrupts disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) struct rt_mutex_waiter *waiter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) struct task_struct *task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) enum rtmutex_chainwalk chwalk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) struct task_struct *owner = rt_mutex_owner(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) struct rt_mutex_waiter *top_waiter = waiter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) struct rt_mutex *next_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) int chain_walk = 0, res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) lockdep_assert_held(&lock->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) * Early deadlock detection. We really don't want the task to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) * enqueue on itself just to untangle the mess later. It's not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * only an optimization. We drop the locks, so another waiter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) * can come in before the chain walk detects the deadlock. So
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) * the other will detect the deadlock and return -EDEADLOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) * which is wrong, as the other waiter is not in a deadlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) * situation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) if (owner == task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) return -EDEADLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) raw_spin_lock(&task->pi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) waiter->task = task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) waiter->lock = lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) waiter->prio = task->prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) waiter->deadline = task->dl.deadline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) /* Get the top priority waiter on the lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) if (rt_mutex_has_waiters(lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) top_waiter = rt_mutex_top_waiter(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) rt_mutex_enqueue(lock, waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) task->pi_blocked_on = waiter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) raw_spin_unlock(&task->pi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) if (!owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) raw_spin_lock(&owner->pi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) if (waiter == rt_mutex_top_waiter(lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) rt_mutex_dequeue_pi(owner, top_waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) rt_mutex_enqueue_pi(owner, waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) rt_mutex_adjust_prio(owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) if (owner->pi_blocked_on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) chain_walk = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) chain_walk = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) /* Store the lock on which owner is blocked or NULL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) next_lock = task_blocked_on_lock(owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) raw_spin_unlock(&owner->pi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) * Even if full deadlock detection is on, if the owner is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) * blocked itself, we can avoid finding this out in the chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) * walk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) if (!chain_walk || !next_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) * The owner can't disappear while holding a lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) * so the owner struct is protected by wait_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) * Gets dropped in rt_mutex_adjust_prio_chain()!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) get_task_struct(owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) raw_spin_unlock_irq(&lock->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) next_lock, waiter, task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) raw_spin_lock_irq(&lock->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) * Remove the top waiter from the current tasks pi waiter tree and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) * queue it up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) * Called with lock->wait_lock held and interrupts disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) struct rt_mutex *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) struct rt_mutex_waiter *waiter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) raw_spin_lock(¤t->pi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) waiter = rt_mutex_top_waiter(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) * Remove it from current->pi_waiters and deboost.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) * We must in fact deboost here in order to ensure we call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) * rt_mutex_setprio() to update p->pi_top_task before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) * task unblocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) rt_mutex_dequeue_pi(current, waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) rt_mutex_adjust_prio(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) * As we are waking up the top waiter, and the waiter stays
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) * queued on the lock until it gets the lock, this lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) * obviously has waiters. Just set the bit here and this has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) * the added benefit of forcing all new tasks into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) * slow path making sure no task of lower priority than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) * the top waiter can steal this lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) * We deboosted before waking the top waiter task such that we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) * run two tasks with the 'same' priority (and ensure the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) * p->pi_top_task pointer points to a blocked task). This however can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) * lead to priority inversion if we would get preempted after the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) * deboost but before waking our donor task, hence the preempt_disable()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) * before unlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) * Pairs with preempt_enable() in rt_mutex_postunlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) wake_q_add(wake_q, waiter->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) raw_spin_unlock(¤t->pi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) * Remove a waiter from a lock and give up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) * Must be called with lock->wait_lock held and interrupts disabled. I must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) * have just failed to try_to_take_rt_mutex().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) static void remove_waiter(struct rt_mutex *lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) struct rt_mutex_waiter *waiter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) struct task_struct *owner = rt_mutex_owner(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) struct rt_mutex *next_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) lockdep_assert_held(&lock->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) raw_spin_lock(¤t->pi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) rt_mutex_dequeue(lock, waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) current->pi_blocked_on = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) raw_spin_unlock(¤t->pi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) * Only update priority if the waiter was the highest priority
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) * waiter of the lock and there is an owner to update.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) if (!owner || !is_top_waiter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) raw_spin_lock(&owner->pi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) rt_mutex_dequeue_pi(owner, waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) if (rt_mutex_has_waiters(lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) rt_mutex_adjust_prio(owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) /* Store the lock on which owner is blocked or NULL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) next_lock = task_blocked_on_lock(owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) raw_spin_unlock(&owner->pi_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) * Don't walk the chain, if the owner task is not blocked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) * itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) if (!next_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) /* gets dropped in rt_mutex_adjust_prio_chain()! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) get_task_struct(owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) raw_spin_unlock_irq(&lock->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) next_lock, NULL, current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) raw_spin_lock_irq(&lock->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) * Recheck the pi chain, in case we got a priority setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) * Called from sched_setscheduler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) void rt_mutex_adjust_pi(struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) struct rt_mutex_waiter *waiter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) struct rt_mutex *next_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) raw_spin_lock_irqsave(&task->pi_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) waiter = task->pi_blocked_on;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) raw_spin_unlock_irqrestore(&task->pi_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) next_lock = waiter->lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) raw_spin_unlock_irqrestore(&task->pi_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) /* gets dropped in rt_mutex_adjust_prio_chain()! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) get_task_struct(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) next_lock, NULL, task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) debug_rt_mutex_init_waiter(waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) RB_CLEAR_NODE(&waiter->pi_tree_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) RB_CLEAR_NODE(&waiter->tree_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) waiter->task = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) * @lock: the rt_mutex to take
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) * @state: the state the task should block in (TASK_INTERRUPTIBLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) * or TASK_UNINTERRUPTIBLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) * @timeout: the pre-initialized and started timer, or NULL for none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) * @waiter: the pre-initialized rt_mutex_waiter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) * Must be called with lock->wait_lock held and interrupts disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) static int __sched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) __rt_mutex_slowlock(struct rt_mutex *lock, int state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) struct hrtimer_sleeper *timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) struct rt_mutex_waiter *waiter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) trace_android_vh_rtmutex_wait_start(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) /* Try to acquire the lock: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) if (try_to_take_rt_mutex(lock, current, waiter))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) * TASK_INTERRUPTIBLE checks for signals and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) * timeout. Ignored otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) if (likely(state == TASK_INTERRUPTIBLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) /* Signal pending? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) if (signal_pending(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) ret = -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) if (timeout && !timeout->task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) ret = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) raw_spin_unlock_irq(&lock->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) debug_rt_mutex_print_deadlock(waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) raw_spin_lock_irq(&lock->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) set_current_state(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) trace_android_vh_rtmutex_wait_finish(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) __set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) struct rt_mutex_waiter *w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) * If the result is not -EDEADLOCK or the caller requested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) * deadlock detection, nothing to do here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) if (res != -EDEADLOCK || detect_deadlock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) * Yell lowdly and stop the task right here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) rt_mutex_print_deadlock(w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) * Slow path lock function:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) static int __sched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) rt_mutex_slowlock(struct rt_mutex *lock, int state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) struct hrtimer_sleeper *timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) enum rtmutex_chainwalk chwalk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) struct rt_mutex_waiter waiter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) rt_mutex_init_waiter(&waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) * Technically we could use raw_spin_[un]lock_irq() here, but this can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) * be called in early boot if the cmpxchg() fast path is disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) * (debug, no architecture support). In this case we will acquire the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) * rtmutex with lock->wait_lock held. But we cannot unconditionally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) * enable interrupts in that early boot case. So we need to use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) * irqsave/restore variants.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) raw_spin_lock_irqsave(&lock->wait_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) /* Try to acquire the lock again: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) if (try_to_take_rt_mutex(lock, current, NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) set_current_state(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) /* Setup the timer, when timeout != NULL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) if (unlikely(timeout))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) if (likely(!ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) /* sleep on the mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) if (unlikely(ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) __set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) remove_waiter(lock, &waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) rt_mutex_handle_deadlock(ret, chwalk, &waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) * try_to_take_rt_mutex() sets the waiter bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) * unconditionally. We might have to fix that up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) fixup_rt_mutex_waiters(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) /* Remove pending timer: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) if (unlikely(timeout))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) hrtimer_cancel(&timeout->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) debug_rt_mutex_free_waiter(&waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) static inline int __rt_mutex_slowtrylock(struct rt_mutex *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) int ret = try_to_take_rt_mutex(lock, current, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) * try_to_take_rt_mutex() sets the lock waiters bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) * unconditionally. Clean this up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) fixup_rt_mutex_waiters(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) * Slow path try-lock function:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) * If the lock already has an owner we fail to get the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) * This can be done without taking the @lock->wait_lock as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) * it is only being read, and this is a trylock anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) if (rt_mutex_owner(lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) * The mutex has currently no owner. Lock the wait lock and try to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) * acquire the lock. We use irqsave here to support early boot calls.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) raw_spin_lock_irqsave(&lock->wait_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) ret = __rt_mutex_slowtrylock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) * Slow path to release a rt-mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) * Return whether the current task needs to call rt_mutex_postunlock().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) struct wake_q_head *wake_q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) /* irqsave required to support early boot calls */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) raw_spin_lock_irqsave(&lock->wait_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) debug_rt_mutex_unlock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) * We must be careful here if the fast path is enabled. If we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) * have no waiters queued we cannot set owner to NULL here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) * because of:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) * foo->lock->owner = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) * rtmutex_lock(foo->lock); <- fast path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) * free = atomic_dec_and_test(foo->refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) * rtmutex_unlock(foo->lock); <- fast path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) * if (free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) * kfree(foo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) * raw_spin_unlock(foo->lock->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) * So for the fastpath enabled kernel:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) * Nothing can set the waiters bit as long as we hold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) * lock->wait_lock. So we do the following sequence:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) * owner = rt_mutex_owner(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) * clear_rt_mutex_waiters(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) * raw_spin_unlock(&lock->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) * if (cmpxchg(&lock->owner, owner, 0) == owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) * return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) * goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) * The fastpath disabled variant is simple as all access to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) * lock->owner is serialized by lock->wait_lock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) * lock->owner = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) * raw_spin_unlock(&lock->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) while (!rt_mutex_has_waiters(lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) /* Drops lock->wait_lock ! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) if (unlock_rt_mutex_safe(lock, flags) == true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) /* Relock the rtmutex and try again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) raw_spin_lock_irqsave(&lock->wait_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) * The wakeup next waiter path does not suffer from the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) * race. See the comments there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) * Queue the next waiter for wakeup once we release the wait_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) mark_wakeup_next_waiter(wake_q, lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) return true; /* call rt_mutex_postunlock() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) * debug aware fast / slowpath lock,trylock,unlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) * The atomic acquire/release ops are compiled away, when either the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) * architecture does not support cmpxchg or when debugging is enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) rt_mutex_fastlock(struct rt_mutex *lock, int state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) int (*slowfn)(struct rt_mutex *lock, int state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) struct hrtimer_sleeper *timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) enum rtmutex_chainwalk chwalk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) struct hrtimer_sleeper *timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) enum rtmutex_chainwalk chwalk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) int (*slowfn)(struct rt_mutex *lock, int state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) struct hrtimer_sleeper *timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) enum rtmutex_chainwalk chwalk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) return slowfn(lock, state, timeout, chwalk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) rt_mutex_fasttrylock(struct rt_mutex *lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) int (*slowfn)(struct rt_mutex *lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) return slowfn(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) * Performs the wakeup of the top-waiter and re-enables preemption.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) void rt_mutex_postunlock(struct wake_q_head *wake_q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) wake_up_q(wake_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) /* Pairs with preempt_disable() in rt_mutex_slowunlock() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) rt_mutex_fastunlock(struct rt_mutex *lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) bool (*slowfn)(struct rt_mutex *lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) struct wake_q_head *wqh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) DEFINE_WAKE_Q(wake_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) if (slowfn(lock, &wake_q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) rt_mutex_postunlock(&wake_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) #ifdef CONFIG_DEBUG_LOCK_ALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) * rt_mutex_lock_nested - lock a rt_mutex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) * @lock: the rt_mutex to be locked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) * @subclass: the lockdep subclass
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) __rt_mutex_lock(lock, subclass);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) #else /* !CONFIG_DEBUG_LOCK_ALLOC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) * rt_mutex_lock - lock a rt_mutex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) * @lock: the rt_mutex to be locked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) void __sched rt_mutex_lock(struct rt_mutex *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) __rt_mutex_lock(lock, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) EXPORT_SYMBOL_GPL(rt_mutex_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) * @lock: the rt_mutex to be locked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) * 0 on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) * -EINTR when interrupted by a signal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) ret = rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) mutex_release(&lock->dep_map, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) * Futex variant, must not use fastpath.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) return rt_mutex_slowtrylock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) return __rt_mutex_slowtrylock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) * rt_mutex_timed_lock - lock a rt_mutex interruptible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) * the timeout structure is provided
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) * by the caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) * @lock: the rt_mutex to be locked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) * @timeout: timeout structure or NULL (no timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) * 0 on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) * -EINTR when interrupted by a signal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) * -ETIMEDOUT when the timeout expired
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) ret = rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) RT_MUTEX_MIN_CHAINWALK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) rt_mutex_slowlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) mutex_release(&lock->dep_map, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) * rt_mutex_trylock - try to lock a rt_mutex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) * @lock: the rt_mutex to be locked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) * This function can only be called in thread context. It's safe to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) * call it from atomic regions, but not from hard interrupt or soft
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) * interrupt context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) * Returns 1 on success and 0 on contention
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) int __sched rt_mutex_trylock(struct rt_mutex *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) EXPORT_SYMBOL_GPL(rt_mutex_trylock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) * rt_mutex_unlock - unlock a rt_mutex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) * @lock: the rt_mutex to be unlocked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) void __sched rt_mutex_unlock(struct rt_mutex *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) mutex_release(&lock->dep_map, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) EXPORT_SYMBOL_GPL(rt_mutex_unlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) * Futex variant, that since futex variants do not use the fast-path, can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) * simple and will not need to retry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) struct wake_q_head *wake_q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) lockdep_assert_held(&lock->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) debug_rt_mutex_unlock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) if (!rt_mutex_has_waiters(lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) lock->owner = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) return false; /* done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) * We've already deboosted, mark_wakeup_next_waiter() will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) * retain preempt_disabled when we drop the wait_lock, to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) * avoid inversion prior to the wakeup. preempt_disable()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) * therein pairs with rt_mutex_postunlock().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) mark_wakeup_next_waiter(wake_q, lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) return true; /* call postunlock() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) void __sched rt_mutex_futex_unlock(struct rt_mutex *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) DEFINE_WAKE_Q(wake_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) bool postunlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) raw_spin_lock_irqsave(&lock->wait_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) postunlock = __rt_mutex_futex_unlock(lock, &wake_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) if (postunlock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) rt_mutex_postunlock(&wake_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) * rt_mutex_destroy - mark a mutex unusable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) * @lock: the mutex to be destroyed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) * This function marks the mutex uninitialized, and any subsequent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) * use of the mutex is forbidden. The mutex must not be locked when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) * this function is called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) void rt_mutex_destroy(struct rt_mutex *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) WARN_ON(rt_mutex_is_locked(lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) #ifdef CONFIG_DEBUG_RT_MUTEXES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) lock->magic = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) EXPORT_SYMBOL_GPL(rt_mutex_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) * __rt_mutex_init - initialize the rt lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) * @lock: the rt lock to be initialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) * Initialize the rt lock to unlocked state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) * Initializing of a locked rt lock is not allowed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) void __rt_mutex_init(struct rt_mutex *lock, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) struct lock_class_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) lock->owner = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) raw_spin_lock_init(&lock->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) lock->waiters = RB_ROOT_CACHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) if (name && key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) debug_rt_mutex_init(lock, name, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) EXPORT_SYMBOL_GPL(__rt_mutex_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) * proxy owner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) * @lock: the rt_mutex to be locked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) * @proxy_owner:the task to set as owner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) * No locking. Caller has to do serializing itself
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) * Special API call for PI-futex support. This initializes the rtmutex and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) * assigns it to @proxy_owner. Concurrent operations on the rtmutex are not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) * possible at this point because the pi_state which contains the rtmutex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) * is not yet visible to other tasks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) struct task_struct *proxy_owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) __rt_mutex_init(lock, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) debug_rt_mutex_proxy_lock(lock, proxy_owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) rt_mutex_set_owner(lock, proxy_owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) * rt_mutex_proxy_unlock - release a lock on behalf of owner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) * @lock: the rt_mutex to be locked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) * No locking. Caller has to do serializing itself
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) * Special API call for PI-futex support. This merrily cleans up the rtmutex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) * (debugging) state. Concurrent operations on this rt_mutex are not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) * possible because it belongs to the pi_state which is about to be freed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) * and it is not longer visible to other tasks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) void rt_mutex_proxy_unlock(struct rt_mutex *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) debug_rt_mutex_proxy_unlock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) rt_mutex_set_owner(lock, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) * @lock: the rt_mutex to take
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) * @waiter: the pre-initialized rt_mutex_waiter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) * @task: the task to prepare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) * NOTE: does _NOT_ remove the @waiter on failure; must either call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) * rt_mutex_wait_proxy_lock() or rt_mutex_cleanup_proxy_lock() after this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) * 0 - task blocked on lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) * 1 - acquired the lock for task, caller should wake it up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) * <0 - error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) * Special API call for PI-futex support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) struct rt_mutex_waiter *waiter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) lockdep_assert_held(&lock->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) if (try_to_take_rt_mutex(lock, task, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) /* We enforce deadlock detection for futexes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) ret = task_blocks_on_rt_mutex(lock, waiter, task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) RT_MUTEX_FULL_CHAINWALK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) if (ret && !rt_mutex_owner(lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) * Reset the return value. We might have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) * returned with -EDEADLK and the owner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) * released the lock while we were walking the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) * pi chain. Let the waiter sort it out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) debug_rt_mutex_print_deadlock(waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) * @lock: the rt_mutex to take
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) * @waiter: the pre-initialized rt_mutex_waiter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) * @task: the task to prepare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) * NOTE: unlike __rt_mutex_start_proxy_lock this _DOES_ remove the @waiter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) * on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) * 0 - task blocked on lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) * 1 - acquired the lock for task, caller should wake it up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) * <0 - error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) * Special API call for PI-futex support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) struct rt_mutex_waiter *waiter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) raw_spin_lock_irq(&lock->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) ret = __rt_mutex_start_proxy_lock(lock, waiter, task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) if (unlikely(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) remove_waiter(lock, waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) raw_spin_unlock_irq(&lock->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) * rt_mutex_next_owner - return the next owner of the lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) * @lock: the rt lock query
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) * Returns the next owner of the lock or NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) * Caller has to serialize against other accessors to the lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) * itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) * Special API call for PI-futex support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) if (!rt_mutex_has_waiters(lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) return rt_mutex_top_waiter(lock)->task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) * rt_mutex_wait_proxy_lock() - Wait for lock acquisition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) * @lock: the rt_mutex we were woken on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) * @to: the timeout, null if none. hrtimer should already have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) * been started.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) * @waiter: the pre-initialized rt_mutex_waiter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) * Wait for the lock acquisition started on our behalf by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) * rt_mutex_start_proxy_lock(). Upon failure, the caller must call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) * rt_mutex_cleanup_proxy_lock().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) * 0 - success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) * <0 - error, one of -EINTR, -ETIMEDOUT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) * Special API call for PI-futex support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) struct hrtimer_sleeper *to,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) struct rt_mutex_waiter *waiter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) raw_spin_lock_irq(&lock->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) /* sleep on the mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) * have to fix that up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) fixup_rt_mutex_waiters(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) raw_spin_unlock_irq(&lock->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) * rt_mutex_cleanup_proxy_lock() - Cleanup failed lock acquisition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) * @lock: the rt_mutex we were woken on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) * @waiter: the pre-initialized rt_mutex_waiter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) * Attempt to clean up after a failed __rt_mutex_start_proxy_lock() or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) * rt_mutex_wait_proxy_lock().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) * Unless we acquired the lock; we're still enqueued on the wait-list and can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) * in fact still be granted ownership until we're removed. Therefore we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) * find we are in fact the owner and must disregard the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) * rt_mutex_wait_proxy_lock() failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) * true - did the cleanup, we done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) * false - we acquired the lock after rt_mutex_wait_proxy_lock() returned,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) * caller should disregards its return value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) * Special API call for PI-futex support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) struct rt_mutex_waiter *waiter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) bool cleanup = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) raw_spin_lock_irq(&lock->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) * Do an unconditional try-lock, this deals with the lock stealing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) * state where __rt_mutex_futex_unlock() -> mark_wakeup_next_waiter()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) * sets a NULL owner.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) * We're not interested in the return value, because the subsequent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) * test on rt_mutex_owner() will infer that. If the trylock succeeded,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) * we will own the lock and it will have removed the waiter. If we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) * failed the trylock, we're still not owner and we need to remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) * ourselves.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) try_to_take_rt_mutex(lock, current, waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) * Unless we're the owner; we're still enqueued on the wait_list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) * So check if we became owner, if not, take us off the wait_list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) if (rt_mutex_owner(lock) != current) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) remove_waiter(lock, waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) cleanup = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) * have to fix that up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) fixup_rt_mutex_waiters(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) raw_spin_unlock_irq(&lock->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) return cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) }