^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * RT Mutexes: blocking mutual exclusion locks with PI support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * started by Ingo Molnar and Thomas Gleixner:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * This file contains the private data structure and API definitions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #ifndef __KERNEL_RTMUTEX_COMMON_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #define __KERNEL_RTMUTEX_COMMON_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/rtmutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/sched/wake_q.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * This is the control structure for tasks blocked on a rt_mutex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * which is allocated on the kernel stack on of the blocked task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * @tree_entry: pi node to enqueue into the mutex waiters tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * @pi_tree_entry: pi node to enqueue into the mutex owner waiters tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * @task: task reference to the blocked task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct rt_mutex_waiter {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct rb_node tree_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct rb_node pi_tree_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct task_struct *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct rt_mutex *lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #ifdef CONFIG_DEBUG_RT_MUTEXES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) unsigned long ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct pid *deadlock_task_pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct rt_mutex *deadlock_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) int prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) u64 deadline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * Various helpers to access the waiters-tree:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #ifdef CONFIG_RT_MUTEXES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static inline int rt_mutex_has_waiters(struct rt_mutex *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) return !RB_EMPTY_ROOT(&lock->waiters.rb_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static inline struct rt_mutex_waiter *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) rt_mutex_top_waiter(struct rt_mutex *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct rb_node *leftmost = rb_first_cached(&lock->waiters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct rt_mutex_waiter *w = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if (leftmost) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) w = rb_entry(leftmost, struct rt_mutex_waiter, tree_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) BUG_ON(w->lock != lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) return w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static inline int task_has_pi_waiters(struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) return !RB_EMPTY_ROOT(&p->pi_waiters.rb_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static inline struct rt_mutex_waiter *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) task_top_pi_waiter(struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return rb_entry(p->pi_waiters.rb_leftmost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct rt_mutex_waiter, pi_tree_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static inline int rt_mutex_has_waiters(struct rt_mutex *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static inline struct rt_mutex_waiter *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) rt_mutex_top_waiter(struct rt_mutex *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static inline int task_has_pi_waiters(struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) static inline struct rt_mutex_waiter *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) task_top_pi_waiter(struct task_struct *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * lock->owner state tracking:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define RT_MUTEX_HAS_WAITERS 1UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) unsigned long owner = (unsigned long) READ_ONCE(lock->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return (struct task_struct *) (owner & ~RT_MUTEX_HAS_WAITERS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * Constants for rt mutex functions which have a selectable deadlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * detection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * RT_MUTEX_MIN_CHAINWALK: Stops the lock chain walk when there are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * no further PI adjustments to be made.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * RT_MUTEX_FULL_CHAINWALK: Invoke deadlock detection with a full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * walk of the lock chain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) enum rtmutex_chainwalk {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) RT_MUTEX_MIN_CHAINWALK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) RT_MUTEX_FULL_CHAINWALK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * PI-futex support (proxy locking functions, etc.):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct task_struct *proxy_owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) extern void rt_mutex_proxy_unlock(struct rt_mutex *lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) extern void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) extern int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct rt_mutex_waiter *waiter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct task_struct *task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) extern int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct rt_mutex_waiter *waiter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct task_struct *task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) extern int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) struct hrtimer_sleeper *to,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct rt_mutex_waiter *waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct rt_mutex_waiter *waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) extern int rt_mutex_futex_trylock(struct rt_mutex *l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) extern int __rt_mutex_futex_trylock(struct rt_mutex *l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) extern void rt_mutex_futex_unlock(struct rt_mutex *lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct wake_q_head *wqh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) extern void rt_mutex_postunlock(struct wake_q_head *wake_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #ifdef CONFIG_DEBUG_RT_MUTEXES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) # include "rtmutex-debug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) # include "rtmutex.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) #endif