^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #define _GNU_SOURCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <pthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <stdio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <dlfcn.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <stdlib.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <sysexits.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "include/liblockdep/mutex.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "../../include/linux/rbtree.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * struct lock_lookup - liblockdep's view of a single unique lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * @orig: pointer to the original pthread lock, used for lookups
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * @dep_map: lockdep's dep_map structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * @key: lockdep's key structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * @node: rb-tree node used to store the lock in a global tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * @name: a unique name for the lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) struct lock_lookup {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) void *orig; /* Original pthread lock, used for lookups */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) struct lockdep_map dep_map; /* Since all locks are dynamic, we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * a dep_map and a key for each lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * Wait, there's no support for key classes? Yup :(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * Most big projects wrap the pthread api with their own calls to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * be compatible with different locking methods. This means that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * "classes" will be brokes since the function that creates all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * locks will point to a generic locking function instead of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * actual code that wants to do the locking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) struct lock_class_key key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct rb_node node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define LIBLOCKDEP_MAX_LOCK_NAME 22
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) char name[LIBLOCKDEP_MAX_LOCK_NAME];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) /* This is where we store our locks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static struct rb_root locks = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static pthread_rwlock_t locks_rwlock = PTHREAD_RWLOCK_INITIALIZER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* pthread mutex API */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #ifdef __GLIBC__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) extern int __pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) extern int __pthread_mutex_lock(pthread_mutex_t *mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) extern int __pthread_mutex_trylock(pthread_mutex_t *mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) extern int __pthread_mutex_unlock(pthread_mutex_t *mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) extern int __pthread_mutex_destroy(pthread_mutex_t *mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define __pthread_mutex_init NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define __pthread_mutex_lock NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define __pthread_mutex_trylock NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define __pthread_mutex_unlock NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define __pthread_mutex_destroy NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static int (*ll_pthread_mutex_init)(pthread_mutex_t *mutex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) const pthread_mutexattr_t *attr) = __pthread_mutex_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static int (*ll_pthread_mutex_lock)(pthread_mutex_t *mutex) = __pthread_mutex_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static int (*ll_pthread_mutex_trylock)(pthread_mutex_t *mutex) = __pthread_mutex_trylock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static int (*ll_pthread_mutex_unlock)(pthread_mutex_t *mutex) = __pthread_mutex_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) static int (*ll_pthread_mutex_destroy)(pthread_mutex_t *mutex) = __pthread_mutex_destroy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) /* pthread rwlock API */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #ifdef __GLIBC__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) extern int __pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) extern int __pthread_rwlock_destroy(pthread_rwlock_t *rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) extern int __pthread_rwlock_wrlock(pthread_rwlock_t *rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) extern int __pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) extern int __pthread_rwlock_rdlock(pthread_rwlock_t *rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) extern int __pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) extern int __pthread_rwlock_unlock(pthread_rwlock_t *rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define __pthread_rwlock_init NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define __pthread_rwlock_destroy NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define __pthread_rwlock_wrlock NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define __pthread_rwlock_trywrlock NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define __pthread_rwlock_rdlock NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define __pthread_rwlock_tryrdlock NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define __pthread_rwlock_unlock NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static int (*ll_pthread_rwlock_init)(pthread_rwlock_t *rwlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) const pthread_rwlockattr_t *attr) = __pthread_rwlock_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static int (*ll_pthread_rwlock_destroy)(pthread_rwlock_t *rwlock) = __pthread_rwlock_destroy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static int (*ll_pthread_rwlock_rdlock)(pthread_rwlock_t *rwlock) = __pthread_rwlock_rdlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static int (*ll_pthread_rwlock_tryrdlock)(pthread_rwlock_t *rwlock) = __pthread_rwlock_tryrdlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) static int (*ll_pthread_rwlock_trywrlock)(pthread_rwlock_t *rwlock) = __pthread_rwlock_trywrlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static int (*ll_pthread_rwlock_wrlock)(pthread_rwlock_t *rwlock) = __pthread_rwlock_wrlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static int (*ll_pthread_rwlock_unlock)(pthread_rwlock_t *rwlock) = __pthread_rwlock_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) enum { none, prepare, done, } __init_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) static void init_preload(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) static void try_init_preload(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (__init_state != done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) init_preload();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static struct rb_node **__get_lock_node(void *lock, struct rb_node **parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct rb_node **node = &locks.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct lock_lookup *l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) while (*node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) l = rb_entry(*node, struct lock_lookup, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) *parent = *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) if (lock < l->orig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) node = &l->node.rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) else if (lock > l->orig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) node = &l->node.rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) return node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #ifndef LIBLOCKDEP_STATIC_ENTRIES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define LIBLOCKDEP_STATIC_ENTRIES 1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static struct lock_lookup __locks[LIBLOCKDEP_STATIC_ENTRIES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static int __locks_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static inline bool is_static_lock(struct lock_lookup *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) return lock >= __locks && lock < __locks + ARRAY_SIZE(__locks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static struct lock_lookup *alloc_lock(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (__init_state != done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * Some programs attempt to initialize and use locks in their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * allocation path. This means that a call to malloc() would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * result in locks being initialized and locked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * Why is it an issue for us? dlsym() below will try allocating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * to give us the original function. Since this allocation will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * result in a locking operations, we have to let pthread deal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * with it, but we can't! we don't have the pointer to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * original API since we're inside dlsym() trying to get it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) int idx = __locks_nr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) if (idx >= ARRAY_SIZE(__locks)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) dprintf(STDERR_FILENO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) "LOCKDEP error: insufficient LIBLOCKDEP_STATIC_ENTRIES\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) exit(EX_UNAVAILABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) return __locks + idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return malloc(sizeof(struct lock_lookup));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static inline void free_lock(struct lock_lookup *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (likely(!is_static_lock(lock)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) free(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * __get_lock - find or create a lock instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * @lock: pointer to a pthread lock function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * Try to find an existing lock in the rbtree using the provided pointer. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * one wasn't found - create it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static struct lock_lookup *__get_lock(void *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) struct rb_node **node, *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct lock_lookup *l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) ll_pthread_rwlock_rdlock(&locks_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) node = __get_lock_node(lock, &parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) ll_pthread_rwlock_unlock(&locks_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (*node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return rb_entry(*node, struct lock_lookup, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) /* We didn't find the lock, let's create it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) l = alloc_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (l == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) l->orig = lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * Currently the name of the lock is the ptr value of the pthread lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * while not optimal, it makes debugging a bit easier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * TODO: Get the real name of the lock using libdwarf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) sprintf(l->name, "%p", lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) lockdep_init_map(&l->dep_map, l->name, &l->key, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) ll_pthread_rwlock_wrlock(&locks_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) /* This might have changed since the last time we fetched it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) node = __get_lock_node(lock, &parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) rb_link_node(&l->node, parent, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) rb_insert_color(&l->node, &locks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) ll_pthread_rwlock_unlock(&locks_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) return l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static void __del_lock(struct lock_lookup *lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) ll_pthread_rwlock_wrlock(&locks_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) rb_erase(&lock->node, &locks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) ll_pthread_rwlock_unlock(&locks_rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) free_lock(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) int pthread_mutex_init(pthread_mutex_t *mutex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) const pthread_mutexattr_t *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * We keep trying to init our preload module because there might be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * code in init sections that tries to touch locks before we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * initialized, in that case we'll need to manually call preload
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * to get us going.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * Funny enough, kernel's lockdep had the same issue, and used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * (almost) the same solution. See look_up_lock_class() in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * kernel/locking/lockdep.c for details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) try_init_preload();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) r = ll_pthread_mutex_init(mutex, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (r == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * We do a dummy initialization here so that lockdep could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * warn us if something fishy is going on - such as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * initializing a held lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) __get_lock(mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) int pthread_mutex_lock(pthread_mutex_t *mutex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) try_init_preload();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 1, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) (unsigned long)_RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * Here's the thing with pthread mutexes: unlike the kernel variant,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * they can fail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * This means that the behaviour here is a bit different from what's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * going on in the kernel: there we just tell lockdep that we took the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * lock before actually taking it, but here we must deal with the case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * that locking failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * To do that we'll "release" the lock if locking failed - this way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * we'll get lockdep doing the correct checks when we try to take
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * the lock, and if that fails - we'll be back to the correct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * state by releasing it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) r = ll_pthread_mutex_lock(mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) lock_release(&__get_lock(mutex)->dep_map, (unsigned long)_RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) int pthread_mutex_trylock(pthread_mutex_t *mutex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) try_init_preload();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) lock_acquire(&__get_lock(mutex)->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) r = ll_pthread_mutex_trylock(mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) lock_release(&__get_lock(mutex)->dep_map, (unsigned long)_RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) int pthread_mutex_unlock(pthread_mutex_t *mutex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) try_init_preload();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) lock_release(&__get_lock(mutex)->dep_map, (unsigned long)_RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * Just like taking a lock, only in reverse!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * If we fail releasing the lock, tell lockdep we're holding it again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) r = ll_pthread_mutex_unlock(mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) int pthread_mutex_destroy(pthread_mutex_t *mutex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) try_init_preload();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * Let's see if we're releasing a lock that's held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * TODO: Hook into free() and add that check there as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) debug_check_no_locks_freed(mutex, sizeof(*mutex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) __del_lock(__get_lock(mutex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) return ll_pthread_mutex_destroy(mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) /* This is the rwlock part, very similar to what happened with mutex above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) int pthread_rwlock_init(pthread_rwlock_t *rwlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) const pthread_rwlockattr_t *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) try_init_preload();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) r = ll_pthread_rwlock_init(rwlock, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (r == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) __get_lock(rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) int pthread_rwlock_destroy(pthread_rwlock_t *rwlock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) try_init_preload();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) debug_check_no_locks_freed(rwlock, sizeof(*rwlock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) __del_lock(__get_lock(rwlock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) return ll_pthread_rwlock_destroy(rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) int pthread_rwlock_rdlock(pthread_rwlock_t *rwlock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) init_preload();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 2, 1, NULL, (unsigned long)_RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) r = ll_pthread_rwlock_rdlock(rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) lock_release(&__get_lock(rwlock)->dep_map, (unsigned long)_RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) init_preload();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 2, 1, NULL, (unsigned long)_RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) r = ll_pthread_rwlock_tryrdlock(rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) lock_release(&__get_lock(rwlock)->dep_map, (unsigned long)_RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) int pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) init_preload();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) r = ll_pthread_rwlock_trywrlock(rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) lock_release(&__get_lock(rwlock)->dep_map, (unsigned long)_RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) int pthread_rwlock_wrlock(pthread_rwlock_t *rwlock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) init_preload();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) r = ll_pthread_rwlock_wrlock(rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) lock_release(&__get_lock(rwlock)->dep_map, (unsigned long)_RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) int pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) init_preload();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) lock_release(&__get_lock(rwlock)->dep_map, (unsigned long)_RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) r = ll_pthread_rwlock_unlock(rwlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) __attribute__((constructor)) static void init_preload(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (__init_state == done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) #ifndef __GLIBC__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) __init_state = prepare;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) ll_pthread_mutex_init = dlsym(RTLD_NEXT, "pthread_mutex_init");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) ll_pthread_mutex_lock = dlsym(RTLD_NEXT, "pthread_mutex_lock");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) ll_pthread_mutex_trylock = dlsym(RTLD_NEXT, "pthread_mutex_trylock");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) ll_pthread_mutex_unlock = dlsym(RTLD_NEXT, "pthread_mutex_unlock");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) ll_pthread_mutex_destroy = dlsym(RTLD_NEXT, "pthread_mutex_destroy");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) ll_pthread_rwlock_init = dlsym(RTLD_NEXT, "pthread_rwlock_init");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) ll_pthread_rwlock_destroy = dlsym(RTLD_NEXT, "pthread_rwlock_destroy");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) ll_pthread_rwlock_rdlock = dlsym(RTLD_NEXT, "pthread_rwlock_rdlock");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) ll_pthread_rwlock_tryrdlock = dlsym(RTLD_NEXT, "pthread_rwlock_tryrdlock");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) ll_pthread_rwlock_wrlock = dlsym(RTLD_NEXT, "pthread_rwlock_wrlock");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) ll_pthread_rwlock_trywrlock = dlsym(RTLD_NEXT, "pthread_rwlock_trywrlock");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) ll_pthread_rwlock_unlock = dlsym(RTLD_NEXT, "pthread_rwlock_unlock");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) __init_state = done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }