Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /* kernel/rwsem.c: R/W semaphores, public implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Written by David Howells (dhowells@redhat.com).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Derived from asm-i386/semaphore.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * and Michel Lespinasse <walken@google.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * Optimistic spinning by Tim Chen <tim.c.chen@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * Rwsem count bit fields re-definition and rwsem rearchitecture by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  * Waiman Long <longman@redhat.com> and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  * Peter Zijlstra <peterz@infradead.org>.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/sched/rt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/sched/task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/sched/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/sched/wake_q.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/sched/clock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/rwsem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include "lock_events.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <trace/hooks/rwsem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <trace/hooks/dtask.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36)  * The least significant 3 bits of the owner value has the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37)  * meanings when set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38)  *  - Bit 0: RWSEM_READER_OWNED - The rwsem is owned by readers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39)  *  - Bit 1: RWSEM_RD_NONSPINNABLE - Readers cannot spin on this lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40)  *  - Bit 2: RWSEM_WR_NONSPINNABLE - Writers cannot spin on this lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42)  * When the rwsem is either owned by an anonymous writer, or it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43)  * reader-owned, but a spinning writer has timed out, both nonspinnable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44)  * bits will be set to disable optimistic spinning by readers and writers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45)  * In the later case, the last unlocking reader should then check the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46)  * writer nonspinnable bit and clear it only to give writers preference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47)  * to acquire the lock via optimistic spinning, but not readers. Similar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48)  * action is also done in the reader slowpath.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50)  * When a writer acquires a rwsem, it puts its task_struct pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51)  * into the owner field. It is cleared after an unlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53)  * When a reader acquires a rwsem, it will also puts its task_struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54)  * pointer into the owner field with the RWSEM_READER_OWNED bit set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55)  * On unlock, the owner field will largely be left untouched. So
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56)  * for a free or reader-owned rwsem, the owner value may contain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57)  * information about the last reader that acquires the rwsem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59)  * That information may be helpful in debugging cases where the system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60)  * seems to hang on a reader owned rwsem especially if only one reader
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61)  * is involved. Ideally we would like to track all the readers that own
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62)  * a rwsem, but the overhead is simply too big.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64)  * Reader optimistic spinning is helpful when the reader critical section
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65)  * is short and there aren't that many readers around. It makes readers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66)  * relatively more preferred than writers. When a writer times out spinning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67)  * on a reader-owned lock and set the nospinnable bits, there are two main
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68)  * reasons for that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70)  *  1) The reader critical section is long, perhaps the task sleeps after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71)  *     acquiring the read lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72)  *  2) There are just too many readers contending the lock causing it to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73)  *     take a while to service all of them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75)  * In the former case, long reader critical section will impede the progress
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76)  * of writers which is usually more important for system performance. In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77)  * the later case, reader optimistic spinning tends to make the reader
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78)  * groups that contain readers that acquire the lock together smaller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79)  * leading to more of them. That may hurt performance in some cases. In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80)  * other words, the setting of nonspinnable bits indicates that reader
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81)  * optimistic spinning may not be helpful for those workloads that cause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82)  * it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84)  * Therefore, any writers that had observed the setting of the writer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85)  * nonspinnable bit for a given rwsem after they fail to acquire the lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86)  * via optimistic spinning will set the reader nonspinnable bit once they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87)  * acquire the write lock. Similarly, readers that observe the setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88)  * of reader nonspinnable bit at slowpath entry will set the reader
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89)  * nonspinnable bits when they acquire the read lock via the wakeup path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91)  * Once the reader nonspinnable bit is on, it will only be reset when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92)  * a writer is able to acquire the rwsem in the fast path or somehow a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93)  * reader or writer in the slowpath doesn't observe the nonspinable bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95)  * This is to discourage reader optmistic spinning on that particular
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96)  * rwsem and make writers more preferred. This adaptive disabling of reader
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97)  * optimistic spinning will alleviate the negative side effect of this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98)  * feature.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) #define RWSEM_READER_OWNED	(1UL << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) #define RWSEM_RD_NONSPINNABLE	(1UL << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) #define RWSEM_WR_NONSPINNABLE	(1UL << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) #define RWSEM_NONSPINNABLE	(RWSEM_RD_NONSPINNABLE | RWSEM_WR_NONSPINNABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) #define RWSEM_OWNER_FLAGS_MASK	(RWSEM_READER_OWNED | RWSEM_NONSPINNABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) #ifdef CONFIG_DEBUG_RWSEMS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) # define DEBUG_RWSEMS_WARN_ON(c, sem)	do {			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	if (!debug_locks_silent &&				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	    WARN_ONCE(c, "DEBUG_RWSEMS_WARN_ON(%s): count = 0x%lx, magic = 0x%lx, owner = 0x%lx, curr 0x%lx, list %sempty\n",\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 		#c, atomic_long_read(&(sem)->count),		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 		(unsigned long) sem->magic,			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 		atomic_long_read(&(sem)->owner), (long)current,	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 		list_empty(&(sem)->wait_list) ? "" : "not "))	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 			debug_locks_off();			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) # define DEBUG_RWSEMS_WARN_ON(c, sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121)  * On 64-bit architectures, the bit definitions of the count are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123)  * Bit  0    - writer locked bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124)  * Bit  1    - waiters present bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125)  * Bit  2    - lock handoff bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126)  * Bits 3-7  - reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127)  * Bits 8-62 - 55-bit reader count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128)  * Bit  63   - read fail bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130)  * On 32-bit architectures, the bit definitions of the count are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132)  * Bit  0    - writer locked bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133)  * Bit  1    - waiters present bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134)  * Bit  2    - lock handoff bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135)  * Bits 3-7  - reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136)  * Bits 8-30 - 23-bit reader count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137)  * Bit  31   - read fail bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139)  * It is not likely that the most significant bit (read fail bit) will ever
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140)  * be set. This guard bit is still checked anyway in the down_read() fastpath
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141)  * just in case we need to use up more of the reader bits for other purpose
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142)  * in the future.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144)  * atomic_long_fetch_add() is used to obtain reader lock, whereas
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145)  * atomic_long_cmpxchg() will be used to obtain writer lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147)  * There are three places where the lock handoff bit may be set or cleared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148)  * 1) rwsem_mark_wake() for readers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149)  * 2) rwsem_try_write_lock() for writers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150)  * 3) Error path of rwsem_down_write_slowpath().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152)  * For all the above cases, wait_lock will be held. A writer must also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153)  * be the first one in the wait_list to be eligible for setting the handoff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154)  * bit. So concurrent setting/clearing of handoff bit is not possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) #define RWSEM_WRITER_LOCKED	(1UL << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) #define RWSEM_FLAG_WAITERS	(1UL << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) #define RWSEM_FLAG_HANDOFF	(1UL << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) #define RWSEM_FLAG_READFAIL	(1UL << (BITS_PER_LONG - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) #define RWSEM_READER_SHIFT	8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) #define RWSEM_READER_BIAS	(1UL << RWSEM_READER_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) #define RWSEM_READER_MASK	(~(RWSEM_READER_BIAS - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) #define RWSEM_WRITER_MASK	RWSEM_WRITER_LOCKED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) #define RWSEM_LOCK_MASK		(RWSEM_WRITER_MASK|RWSEM_READER_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) #define RWSEM_READ_FAILED_MASK	(RWSEM_WRITER_MASK|RWSEM_FLAG_WAITERS|\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 				 RWSEM_FLAG_HANDOFF|RWSEM_FLAG_READFAIL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170)  * All writes to owner are protected by WRITE_ONCE() to make sure that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171)  * store tearing can't happen as optimistic spinners may read and use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172)  * the owner value concurrently without lock. Read from owner, however,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173)  * may not need READ_ONCE() as long as the pointer value is only used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174)  * for comparison and isn't being dereferenced.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) static inline void rwsem_set_owner(struct rw_semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	atomic_long_set(&sem->owner, (long)current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	trace_android_vh_rwsem_set_owner(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) static inline void rwsem_clear_owner(struct rw_semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	atomic_long_set(&sem->owner, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188)  * Test the flags in the owner field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) static inline bool rwsem_test_oflags(struct rw_semaphore *sem, long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	return atomic_long_read(&sem->owner) & flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196)  * The task_struct pointer of the last owning reader will be left in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197)  * the owner field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199)  * Note that the owner value just indicates the task has owned the rwsem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200)  * previously, it may not be the real owner or one of the real owners
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201)  * anymore when that field is examined, so take it with a grain of salt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203)  * The reader non-spinnable bit is preserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 					    struct task_struct *owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	unsigned long val = (unsigned long)owner | RWSEM_READER_OWNED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 		(atomic_long_read(&sem->owner) & RWSEM_RD_NONSPINNABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	atomic_long_set(&sem->owner, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	__rwsem_set_reader_owned(sem, current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	trace_android_vh_rwsem_set_reader_owned(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221)  * Return true if the rwsem is owned by a reader.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) static inline bool is_rwsem_reader_owned(struct rw_semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) #ifdef CONFIG_DEBUG_RWSEMS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	 * Check the count to see if it is write-locked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	long count = atomic_long_read(&sem->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	if (count & RWSEM_WRITER_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	return rwsem_test_oflags(sem, RWSEM_READER_OWNED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) #ifdef CONFIG_DEBUG_RWSEMS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239)  * With CONFIG_DEBUG_RWSEMS configured, it will make sure that if there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240)  * is a task pointer in owner of a reader-owned rwsem, it will be the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241)  * real owner or one of the real owners. The only exception is when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242)  * unlock is done by up_read_non_owner().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	unsigned long val = atomic_long_read(&sem->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	while ((val & ~RWSEM_OWNER_FLAGS_MASK) == (unsigned long)current) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 		if (atomic_long_try_cmpxchg(&sem->owner, &val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 					    val & RWSEM_OWNER_FLAGS_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261)  * Set the RWSEM_NONSPINNABLE bits if the RWSEM_READER_OWNED flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262)  * remains set. Otherwise, the operation will be aborted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) static inline void rwsem_set_nonspinnable(struct rw_semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	unsigned long owner = atomic_long_read(&sem->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 		if (!(owner & RWSEM_READER_OWNED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 		if (owner & RWSEM_NONSPINNABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	} while (!atomic_long_try_cmpxchg(&sem->owner, &owner,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 					  owner | RWSEM_NONSPINNABLE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) static inline bool rwsem_read_trylock(struct rw_semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	long cnt = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	if (WARN_ON_ONCE(cnt < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 		rwsem_set_nonspinnable(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	return !(cnt & RWSEM_READ_FAILED_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286)  * Return just the real task structure pointer of the owner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) static inline struct task_struct *rwsem_owner(struct rw_semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	return (struct task_struct *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 		(atomic_long_read(&sem->owner) & ~RWSEM_OWNER_FLAGS_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295)  * Return the real task structure pointer of the owner and the embedded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296)  * flags in the owner. pflags must be non-NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) static inline struct task_struct *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) rwsem_owner_flags(struct rw_semaphore *sem, unsigned long *pflags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	unsigned long owner = atomic_long_read(&sem->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	*pflags = owner & RWSEM_OWNER_FLAGS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	return (struct task_struct *)(owner & ~RWSEM_OWNER_FLAGS_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308)  * Guide to the rw_semaphore's count field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310)  * When the RWSEM_WRITER_LOCKED bit in count is set, the lock is owned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311)  * by a writer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313)  * The lock is owned by readers when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314)  * (1) the RWSEM_WRITER_LOCKED isn't set in count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315)  * (2) some of the reader bits are set in count, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316)  * (3) the owner field has RWSEM_READ_OWNED bit set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318)  * Having some reader bits set is not enough to guarantee a readers owned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319)  * lock as the readers may be in the process of backing out from the count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320)  * and a writer has just released the lock. So another writer may steal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321)  * the lock immediately after that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325)  * Initialize an rwsem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) void __init_rwsem(struct rw_semaphore *sem, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 		  struct lock_class_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) #ifdef CONFIG_DEBUG_LOCK_ALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	 * Make sure we are not reinitializing a held semaphore:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	debug_check_no_locks_freed((void *)sem, sizeof(*sem));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) #ifdef CONFIG_DEBUG_RWSEMS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	sem->magic = sem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	raw_spin_lock_init(&sem->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	INIT_LIST_HEAD(&sem->wait_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	atomic_long_set(&sem->owner, 0L);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	osq_lock_init(&sem->osq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	trace_android_vh_rwsem_init(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) EXPORT_SYMBOL(__init_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) #define rwsem_first_waiter(sem) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	list_first_entry(&sem->wait_list, struct rwsem_waiter, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) enum rwsem_wake_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	RWSEM_WAKE_ANY,		/* Wake whatever's at head of wait list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	RWSEM_WAKE_READERS,	/* Wake readers only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	RWSEM_WAKE_READ_OWNED	/* Waker thread holds the read lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) enum writer_wait_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	WRITER_NOT_FIRST,	/* Writer is not first in wait list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	WRITER_FIRST,		/* Writer is first in wait list     */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	WRITER_HANDOFF		/* Writer is first & handoff needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367)  * The typical HZ value is either 250 or 1000. So set the minimum waiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368)  * time to at least 4ms or 1 jiffy (if it is higher than 4ms) in the wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369)  * queue before initiating the handoff protocol.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) #define RWSEM_WAIT_TIMEOUT	DIV_ROUND_UP(HZ, 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374)  * Magic number to batch-wakeup waiting readers, even when writers are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375)  * also present in the queue. This both limits the amount of work the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376)  * waking thread must do and also prevents any potential counter overflow,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377)  * however unlikely.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) #define MAX_READERS_WAKEUP	0x100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382)  * handle the lock release when processes blocked on it that can now run
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383)  * - if we come here from up_xxxx(), then the RWSEM_FLAG_WAITERS bit must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384)  *   have been set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385)  * - there must be someone on the queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386)  * - the wait_lock must be held by the caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387)  * - tasks are marked for wakeup, the caller must later invoke wake_up_q()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388)  *   to actually wakeup the blocked task(s) and drop the reference count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389)  *   preferably when the wait_lock is released
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390)  * - woken process blocks are discarded from the list after having task zeroed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391)  * - writers are only marked woken if downgrading is false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) static void rwsem_mark_wake(struct rw_semaphore *sem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 			    enum rwsem_wake_type wake_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 			    struct wake_q_head *wake_q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	struct rwsem_waiter *waiter, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	long oldcount, woken = 0, adjustment = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	struct list_head wlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	lockdep_assert_held(&sem->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	 * Take a peek at the queue head waiter such that we can determine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	 * the wakeup(s) to perform.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	waiter = rwsem_first_waiter(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 		if (wake_type == RWSEM_WAKE_ANY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 			 * Mark writer at the front of the queue for wakeup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 			 * Until the task is actually later awoken later by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 			 * the caller, other writers are able to steal it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 			 * Readers, on the other hand, will block as they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 			 * will notice the queued writer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 			wake_q_add(wake_q, waiter->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 			lockevent_inc(rwsem_wake_writer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	 * No reader wakeup if there are too many of them already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	if (unlikely(atomic_long_read(&sem->count) < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	 * Writers might steal the lock before we grant it to the next reader.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	 * We prefer to do the first reader grant before counting readers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	 * so we can bail out early if a writer stole the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	if (wake_type != RWSEM_WAKE_READ_OWNED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 		struct task_struct *owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 		adjustment = RWSEM_READER_BIAS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		oldcount = atomic_long_fetch_add(adjustment, &sem->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 		if (unlikely(oldcount & RWSEM_WRITER_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 			 * When we've been waiting "too" long (for writers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 			 * to give up the lock), request a HANDOFF to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 			 * force the issue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 			if (!(oldcount & RWSEM_FLAG_HANDOFF) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 			    time_after(jiffies, waiter->timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 				adjustment -= RWSEM_FLAG_HANDOFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 				lockevent_inc(rwsem_rlock_handoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 			atomic_long_add(-adjustment, &sem->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 		 * Set it to reader-owned to give spinners an early
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		 * indication that readers now have the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 		 * The reader nonspinnable bit seen at slowpath entry of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 		 * the reader is copied over.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		owner = waiter->task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 		if (waiter->last_rowner & RWSEM_RD_NONSPINNABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 			owner = (void *)((unsigned long)owner | RWSEM_RD_NONSPINNABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 			lockevent_inc(rwsem_opt_norspin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 		__rwsem_set_reader_owned(sem, owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	 * Grant up to MAX_READERS_WAKEUP read locks to all the readers in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	 * queue. We know that the woken will be at least 1 as we accounted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	 * for above. Note we increment the 'active part' of the count by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	 * number of readers before waking any processes up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	 * This is an adaptation of the phase-fair R/W locks where at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	 * reader phase (first waiter is a reader), all readers are eligible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	 * to acquire the lock at the same time irrespective of their order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	 * in the queue. The writers acquire the lock according to their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	 * order in the queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	 * We have to do wakeup in 2 passes to prevent the possibility that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	 * the reader count may be decremented before it is incremented. It
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	 * is because the to-be-woken waiter may not have slept yet. So it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	 * may see waiter->task got cleared, finish its critical section and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	 * do an unlock before the reader count increment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	 * 1) Collect the read-waiters in a separate list, count them and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	 *    fully increment the reader count in rwsem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	 * 2) For each waiters in the new list, clear waiter->task and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	 *    put them into wake_q to be woken up later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	INIT_LIST_HEAD(&wlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 		if (waiter->type == RWSEM_WAITING_FOR_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 		woken++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		list_move_tail(&waiter->list, &wlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 		trace_android_vh_rwsem_mark_wake_readers(sem, waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 		 * Limit # of readers that can be woken up per wakeup call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 		if (woken >= MAX_READERS_WAKEUP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	adjustment = woken * RWSEM_READER_BIAS - adjustment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	lockevent_cond_inc(rwsem_wake_reader, woken);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	if (list_empty(&sem->wait_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 		/* hit end of list above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 		adjustment -= RWSEM_FLAG_WAITERS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	 * When we've woken a reader, we no longer need to force writers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	 * to give up the lock and we can clear HANDOFF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	if (woken && (atomic_long_read(&sem->count) & RWSEM_FLAG_HANDOFF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 		adjustment -= RWSEM_FLAG_HANDOFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	if (adjustment)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 		atomic_long_add(adjustment, &sem->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	/* 2nd pass */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	list_for_each_entry_safe(waiter, tmp, &wlist, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 		struct task_struct *tsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 		tsk = waiter->task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 		get_task_struct(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 		 * Ensure calling get_task_struct() before setting the reader
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		 * waiter to nil such that rwsem_down_read_slowpath() cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 		 * race with do_exit() by always holding a reference count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 		 * to the task to wakeup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 		smp_store_release(&waiter->task, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		 * Ensure issuing the wakeup (either by us or someone else)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		 * after setting the reader waiter to nil.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 		wake_q_add_safe(wake_q, tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549)  * This function must be called with the sem->wait_lock held to prevent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550)  * race conditions between checking the rwsem wait list and setting the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551)  * sem->count accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553)  * If wstate is WRITER_HANDOFF, it will make sure that either the handoff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554)  * bit is set or the lock is acquired with handoff bit cleared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 					enum writer_wait_state wstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	long count, new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	lockdep_assert_held(&sem->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	count = atomic_long_read(&sem->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 		bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 		if (has_handoff && wstate == WRITER_NOT_FIRST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 		new = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 		if (count & RWSEM_LOCK_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 			if (has_handoff || (wstate != WRITER_HANDOFF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 				return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 			new |= RWSEM_FLAG_HANDOFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 			new |= RWSEM_WRITER_LOCKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 			new &= ~RWSEM_FLAG_HANDOFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 			if (list_is_singular(&sem->wait_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 				new &= ~RWSEM_FLAG_WAITERS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	} while (!atomic_long_try_cmpxchg_acquire(&sem->count, &count, new));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	 * We have either acquired the lock with handoff bit cleared or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	 * set the handoff bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	if (new & RWSEM_FLAG_HANDOFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	rwsem_set_owner(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599)  * Try to acquire read lock before the reader is put on wait queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600)  * Lock acquisition isn't allowed if the rwsem is locked or a writer handoff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601)  * is ongoing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) static inline bool rwsem_try_read_lock_unqueued(struct rw_semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	long count = atomic_long_read(&sem->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	if (count & (RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	count = atomic_long_fetch_add_acquire(RWSEM_READER_BIAS, &sem->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	if (!(count & (RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		rwsem_set_reader_owned(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		lockevent_inc(rwsem_opt_rlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	/* Back out the change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	atomic_long_add(-RWSEM_READER_BIAS, &sem->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623)  * Try to acquire write lock before the writer has been put on wait queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	long count = atomic_long_read(&sem->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	while (!(count & (RWSEM_LOCK_MASK|RWSEM_FLAG_HANDOFF))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		if (atomic_long_try_cmpxchg_acquire(&sem->count, &count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 					count | RWSEM_WRITER_LOCKED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 			rwsem_set_owner(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 			lockevent_inc(rwsem_opt_wlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) static inline bool owner_on_cpu(struct task_struct *owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	 * As lock holder preemption issue, we both skip spinning if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	 * task is not on cpu or its cpu is preempted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	return owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 					   unsigned long nonspinnable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	struct task_struct *owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	bool ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	if (need_resched()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		lockevent_inc(rwsem_opt_fail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	owner = rwsem_owner_flags(sem, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	 * Don't check the read-owner as the entry may be stale.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	if ((flags & nonspinnable) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	    (owner && !(flags & RWSEM_READER_OWNED) && !owner_on_cpu(owner)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	lockevent_cond_inc(rwsem_opt_fail, !ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678)  * The rwsem_spin_on_owner() function returns the folowing 4 values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679)  * depending on the lock owner state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680)  *   OWNER_NULL  : owner is currently NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681)  *   OWNER_WRITER: when owner changes and is a writer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682)  *   OWNER_READER: when owner changes and the new owner may be a reader.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683)  *   OWNER_NONSPINNABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684)  *		   when optimistic spinning has to stop because either the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685)  *		   owner stops running, is unknown, or its timeslice has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686)  *		   been used up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) enum owner_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	OWNER_NULL		= 1 << 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	OWNER_WRITER		= 1 << 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	OWNER_READER		= 1 << 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	OWNER_NONSPINNABLE	= 1 << 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) #define OWNER_SPINNABLE		(OWNER_NULL | OWNER_WRITER | OWNER_READER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) static inline enum owner_state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) rwsem_owner_state(struct task_struct *owner, unsigned long flags, unsigned long nonspinnable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	if (flags & nonspinnable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 		return OWNER_NONSPINNABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	if (flags & RWSEM_READER_OWNED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 		return OWNER_READER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	return owner ? OWNER_WRITER : OWNER_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) static noinline enum owner_state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) rwsem_spin_on_owner(struct rw_semaphore *sem, unsigned long nonspinnable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	struct task_struct *new, *owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	unsigned long flags, new_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	enum owner_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	owner = rwsem_owner_flags(sem, &flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	state = rwsem_owner_state(owner, flags, nonspinnable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	if (state != OWNER_WRITER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 		return state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 		 * When a waiting writer set the handoff flag, it may spin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 		 * on the owner as well. Once that writer acquires the lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 		 * we can spin on it. So we don't need to quit even when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 		 * handoff bit is set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 		new = rwsem_owner_flags(sem, &new_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		if ((new != owner) || (new_flags != flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 			state = rwsem_owner_state(new, new_flags, nonspinnable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 		 * Ensure we emit the owner->on_cpu, dereference _after_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		 * checking sem->owner still matches owner, if that fails,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 		 * owner might point to free()d memory, if it still matches,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 		 * the rcu_read_lock() ensures the memory stays valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 		barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 		if (need_resched() || !owner_on_cpu(owner)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 			state = OWNER_NONSPINNABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	return state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755)  * Calculate reader-owned rwsem spinning threshold for writer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757)  * The more readers own the rwsem, the longer it will take for them to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758)  * wind down and free the rwsem. So the empirical formula used to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759)  * determine the actual spinning time limit here is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761)  *   Spinning threshold = (10 + nr_readers/2)us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763)  * The limit is capped to a maximum of 25us (30 readers). This is just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764)  * a heuristic and is subjected to change in the future.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) static inline u64 rwsem_rspin_threshold(struct rw_semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	long count = atomic_long_read(&sem->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	int readers = count >> RWSEM_READER_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	u64 delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	if (readers > 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		readers = 30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	delta = (20 + readers) * NSEC_PER_USEC / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	return sched_clock() + delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) static bool rwsem_optimistic_spin(struct rw_semaphore *sem, bool wlock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	bool taken = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	int prev_owner_state = OWNER_NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	int loop = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	u64 rspin_threshold = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	unsigned long nonspinnable = wlock ? RWSEM_WR_NONSPINNABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 					   : RWSEM_RD_NONSPINNABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	/* sem->wait_lock should not be held when doing optimistic spinning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	if (!osq_lock(&sem->osq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	 * Optimistically spin on the owner field and attempt to acquire the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	 * lock whenever the owner changes. Spinning will be stopped when:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	 *  1) the owning writer isn't running; or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	 *  2) readers own the lock and spinning time has exceeded limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		enum owner_state owner_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		owner_state = rwsem_spin_on_owner(sem, nonspinnable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		if (!(owner_state & OWNER_SPINNABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		 * Try to acquire the lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		taken = wlock ? rwsem_try_write_lock_unqueued(sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 			      : rwsem_try_read_lock_unqueued(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 		if (taken)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 		 * Time-based reader-owned rwsem optimistic spinning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 		if (wlock && (owner_state == OWNER_READER)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 			 * Re-initialize rspin_threshold every time when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 			 * the owner state changes from non-reader to reader.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 			 * This allows a writer to steal the lock in between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 			 * 2 reader phases and have the threshold reset at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 			 * the beginning of the 2nd reader phase.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 			if (prev_owner_state != OWNER_READER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 				if (rwsem_test_oflags(sem, nonspinnable))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 				rspin_threshold = rwsem_rspin_threshold(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 				loop = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 			 * Check time threshold once every 16 iterations to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 			 * avoid calling sched_clock() too frequently so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 			 * as to reduce the average latency between the times
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 			 * when the lock becomes free and when the spinner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 			 * is ready to do a trylock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 			else if (!(++loop & 0xf) && (sched_clock() > rspin_threshold)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 				rwsem_set_nonspinnable(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 				lockevent_inc(rwsem_opt_nospin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		 * An RT task cannot do optimistic spinning if it cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 		 * be sure the lock holder is running or live-lock may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 		 * happen if the current task and the lock holder happen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		 * to run in the same CPU. However, aborting optimistic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 		 * spinning while a NULL owner is detected may miss some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		 * opportunity where spinning can continue without causing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 		 * problem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		 * There are 2 possible cases where an RT task may be able
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		 * to continue spinning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		 * 1) The lock owner is in the process of releasing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		 *    lock, sem->owner is cleared but the lock has not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 		 *    been released yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 		 * 2) The lock was free and owner cleared, but another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		 *    task just comes in and acquire the lock before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 		 *    we try to get it. The new owner may be a spinnable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 		 *    writer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		 * To take advantage of two scenarios listed agove, the RT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 		 * task is made to retry one more time to see if it can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 		 * acquire the lock or continue spinning on the new owning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 		 * writer. Of course, if the time lag is long enough or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 		 * new owner is not a writer or spinnable, the RT task will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 		 * quit spinning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 		 * If the owner is a writer, the need_resched() check is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 		 * done inside rwsem_spin_on_owner(). If the owner is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 		 * a writer, need_resched() check needs to be done here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 		if (owner_state != OWNER_WRITER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 			if (need_resched())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 			if (rt_task(current) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 			   (prev_owner_state != OWNER_WRITER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		prev_owner_state = owner_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		 * The cpu_relax() call is a compiler barrier which forces
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 		 * everything in this loop to be re-loaded. We don't need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		 * memory barriers as we'll eventually observe the right
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 		 * values at the cost of a few extra spins.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	osq_unlock(&sem->osq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	lockevent_cond_inc(rwsem_opt_fail, !taken);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	return taken;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904)  * Clear the owner's RWSEM_WR_NONSPINNABLE bit if it is set. This should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905)  * only be called when the reader count reaches 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907)  * This give writers better chance to acquire the rwsem first before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908)  * readers when the rwsem was being held by readers for a relatively long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909)  * period of time. Race can happen that an optimistic spinner may have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910)  * just stolen the rwsem and set the owner, but just clearing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911)  * RWSEM_WR_NONSPINNABLE bit will do no harm anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) static inline void clear_wr_nonspinnable(struct rw_semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	if (rwsem_test_oflags(sem, RWSEM_WR_NONSPINNABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		atomic_long_andnot(RWSEM_WR_NONSPINNABLE, &sem->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920)  * This function is called when the reader fails to acquire the lock via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921)  * optimistic spinning. In this case we will still attempt to do a trylock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922)  * when comparing the rwsem state right now with the state when entering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923)  * the slowpath indicates that the reader is still in a valid reader phase.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924)  * This happens when the following conditions are true:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926)  * 1) The lock is currently reader owned, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927)  * 2) The lock is previously not reader-owned or the last read owner changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929)  * In the former case, we have transitioned from a writer phase to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930)  * reader-phase while spinning. In the latter case, it means the reader
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931)  * phase hasn't ended when we entered the optimistic spinning loop. In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932)  * both cases, the reader is eligible to acquire the lock. This is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933)  * secondary path where a read lock is acquired optimistically.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935)  * The reader non-spinnable bit wasn't set at time of entry or it will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936)  * not be here at all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) static inline bool rwsem_reader_phase_trylock(struct rw_semaphore *sem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 					      unsigned long last_rowner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	unsigned long owner = atomic_long_read(&sem->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	if (!(owner & RWSEM_READER_OWNED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	if (((owner ^ last_rowner) & ~RWSEM_OWNER_FLAGS_MASK) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	    rwsem_try_read_lock_unqueued(sem)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		lockevent_inc(rwsem_opt_rlock2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 		lockevent_add(rwsem_opt_fail, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 					   unsigned long nonspinnable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) static inline bool rwsem_optimistic_spin(struct rw_semaphore *sem, bool wlock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) static inline void clear_wr_nonspinnable(struct rw_semaphore *sem) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) static inline bool rwsem_reader_phase_trylock(struct rw_semaphore *sem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 					      unsigned long last_rowner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) rwsem_spin_on_owner(struct rw_semaphore *sem, unsigned long nonspinnable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) #define OWNER_NULL	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983)  * Wait for the read lock to be granted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) static struct rw_semaphore __sched *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	long count, adjustment = -RWSEM_READER_BIAS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	struct rwsem_waiter waiter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	DEFINE_WAKE_Q(wake_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	bool wake = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	bool already_on_list = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	 * Save the current read-owner of rwsem, if available, and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	 * reader nonspinnable bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	waiter.last_rowner = atomic_long_read(&sem->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	if (!(waiter.last_rowner & RWSEM_READER_OWNED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 		waiter.last_rowner &= RWSEM_RD_NONSPINNABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	if (!rwsem_can_spin_on_owner(sem, RWSEM_RD_NONSPINNABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 		goto queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	 * Undo read bias from down_read() and do optimistic spinning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	atomic_long_add(-RWSEM_READER_BIAS, &sem->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	adjustment = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	if (rwsem_optimistic_spin(sem, false)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		/* rwsem_optimistic_spin() implies ACQUIRE on success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 		 * Wake up other readers in the wait list if the front
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 		 * waiter is a reader.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 		if ((atomic_long_read(&sem->count) & RWSEM_FLAG_WAITERS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 			raw_spin_lock_irq(&sem->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 			if (!list_empty(&sem->wait_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 				rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 						&wake_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 			raw_spin_unlock_irq(&sem->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 			wake_up_q(&wake_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 		return sem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	} else if (rwsem_reader_phase_trylock(sem, waiter.last_rowner)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 		/* rwsem_reader_phase_trylock() implies ACQUIRE on success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 		return sem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) queue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	waiter.task = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	waiter.type = RWSEM_WAITING_FOR_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	raw_spin_lock_irq(&sem->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	if (list_empty(&sem->wait_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		 * In case the wait queue is empty and the lock isn't owned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 		 * by a writer or has the handoff bit set, this reader can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		 * exit the slowpath and return immediately as its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		 * RWSEM_READER_BIAS has already been set in the count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		if (adjustment && !(atomic_long_read(&sem->count) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 		     (RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 			/* Provide lock ACQUIRE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 			smp_acquire__after_ctrl_dep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 			raw_spin_unlock_irq(&sem->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 			rwsem_set_reader_owned(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 			lockevent_inc(rwsem_rlock_fast);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 			return sem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 		adjustment += RWSEM_FLAG_WAITERS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	trace_android_vh_alter_rwsem_list_add(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 					&waiter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 					sem, &already_on_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	if (!already_on_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		list_add_tail(&waiter.list, &sem->wait_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	/* we're now waiting on the lock, but no longer actively locking */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	if (adjustment)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 		count = atomic_long_add_return(adjustment, &sem->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		count = atomic_long_read(&sem->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	 * If there are no active locks, wake the front queued process(es).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	 * If there are no writers and we are first in the queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	 * wake our own waiter to join the existing active readers !
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	if (!(count & RWSEM_LOCK_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		clear_wr_nonspinnable(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 		wake = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	if (wake || (!(count & RWSEM_WRITER_MASK) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 		    (adjustment & RWSEM_FLAG_WAITERS)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 		rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	trace_android_vh_rwsem_wake(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	raw_spin_unlock_irq(&sem->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	wake_up_q(&wake_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	/* wait to be given the lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	trace_android_vh_rwsem_read_wait_start(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 		set_current_state(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 		if (!smp_load_acquire(&waiter.task)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 			/* Matches rwsem_mark_wake()'s smp_store_release(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 		if (signal_pending_state(state, current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 			raw_spin_lock_irq(&sem->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 			if (waiter.task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 				goto out_nolock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 			raw_spin_unlock_irq(&sem->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 			/* Ordered by sem->wait_lock against rwsem_mark_wake(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 		schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 		lockevent_inc(rwsem_sleep_reader);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	__set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	trace_android_vh_rwsem_read_wait_finish(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	lockevent_inc(rwsem_rlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	return sem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) out_nolock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	list_del(&waiter.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	if (list_empty(&sem->wait_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 		atomic_long_andnot(RWSEM_FLAG_WAITERS|RWSEM_FLAG_HANDOFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 				   &sem->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	raw_spin_unlock_irq(&sem->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	__set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	trace_android_vh_rwsem_read_wait_finish(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	lockevent_inc(rwsem_rlock_fail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	return ERR_PTR(-EINTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)  * This function is called by the a write lock owner. So the owner value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)  * won't get changed by others.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) static inline void rwsem_disable_reader_optspin(struct rw_semaphore *sem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 						bool disable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	if (unlikely(disable)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 		atomic_long_or(RWSEM_RD_NONSPINNABLE, &sem->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 		lockevent_inc(rwsem_opt_norspin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)  * Wait until we successfully acquire the write lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) static struct rw_semaphore *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	long count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	bool disable_rspin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	enum writer_wait_state wstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	struct rwsem_waiter waiter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	struct rw_semaphore *ret = sem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	DEFINE_WAKE_Q(wake_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	bool already_on_list = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	/* do optimistic spinning and steal lock if possible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	if (rwsem_can_spin_on_owner(sem, RWSEM_WR_NONSPINNABLE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	    rwsem_optimistic_spin(sem, true)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 		/* rwsem_optimistic_spin() implies ACQUIRE on success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 		return sem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	 * Disable reader optimistic spinning for this rwsem after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	 * acquiring the write lock when the setting of the nonspinnable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	 * bits are observed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	disable_rspin = atomic_long_read(&sem->owner) & RWSEM_NONSPINNABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	 * Optimistic spinning failed, proceed to the slowpath
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	 * and block until we can acquire the sem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	waiter.task = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	waiter.type = RWSEM_WAITING_FOR_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	raw_spin_lock_irq(&sem->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	/* account for this before adding a new element to the list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	wstate = list_empty(&sem->wait_list) ? WRITER_FIRST : WRITER_NOT_FIRST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	trace_android_vh_alter_rwsem_list_add(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 					&waiter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 					sem, &already_on_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	if (!already_on_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 		list_add_tail(&waiter.list, &sem->wait_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	/* we're now waiting on the lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	if (wstate == WRITER_NOT_FIRST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 		count = atomic_long_read(&sem->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 		 * If there were already threads queued before us and:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 		 *  1) there are no active locks, wake the front
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 		 *     queued process(es) as the handoff bit might be set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 		 *  2) there are no active writers and some readers, the lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 		 *     must be read owned; so we try to wake any read lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 		 *     waiters that were queued ahead of us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 		if (count & RWSEM_WRITER_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 			goto wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 		rwsem_mark_wake(sem, (count & RWSEM_READER_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 					? RWSEM_WAKE_READERS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 					: RWSEM_WAKE_ANY, &wake_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 		if (!wake_q_empty(&wake_q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 			 * We want to minimize wait_lock hold time especially
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 			 * when a large number of readers are to be woken up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 			raw_spin_unlock_irq(&sem->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 			wake_up_q(&wake_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 			wake_q_init(&wake_q);	/* Used again, reinit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 			raw_spin_lock_irq(&sem->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 		atomic_long_or(RWSEM_FLAG_WAITERS, &sem->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) wait:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	trace_android_vh_rwsem_wake(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	/* wait until we successfully acquire the lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	trace_android_vh_rwsem_write_wait_start(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	set_current_state(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 		if (rwsem_try_write_lock(sem, wstate)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 			/* rwsem_try_write_lock() implies ACQUIRE on success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 		raw_spin_unlock_irq(&sem->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 		 * After setting the handoff bit and failing to acquire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 		 * the lock, attempt to spin on owner to accelerate lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 		 * transfer. If the previous owner is a on-cpu writer and it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 		 * has just released the lock, OWNER_NULL will be returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 		 * In this case, we attempt to acquire the lock again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 		 * without sleeping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 		if (wstate == WRITER_HANDOFF &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 		    rwsem_spin_on_owner(sem, RWSEM_NONSPINNABLE) == OWNER_NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 			goto trylock_again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 		/* Block until there are no active lockers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 		for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 			if (signal_pending_state(state, current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 				goto out_nolock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 			schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 			lockevent_inc(rwsem_sleep_writer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 			set_current_state(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 			 * If HANDOFF bit is set, unconditionally do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 			 * a trylock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 			if (wstate == WRITER_HANDOFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 			if ((wstate == WRITER_NOT_FIRST) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 			    (rwsem_first_waiter(sem) == &waiter))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 				wstate = WRITER_FIRST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 			count = atomic_long_read(&sem->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 			if (!(count & RWSEM_LOCK_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 			 * The setting of the handoff bit is deferred
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 			 * until rwsem_try_write_lock() is called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 			if ((wstate == WRITER_FIRST) && (rt_task(current) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 			    time_after(jiffies, waiter.timeout))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 				wstate = WRITER_HANDOFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 				lockevent_inc(rwsem_wlock_handoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) trylock_again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 		raw_spin_lock_irq(&sem->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	__set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	trace_android_vh_rwsem_write_wait_finish(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	list_del(&waiter.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	rwsem_disable_reader_optspin(sem, disable_rspin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	raw_spin_unlock_irq(&sem->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	lockevent_inc(rwsem_wlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) out_nolock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	__set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	trace_android_vh_rwsem_write_wait_finish(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	raw_spin_lock_irq(&sem->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	list_del(&waiter.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	if (unlikely(wstate == WRITER_HANDOFF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 		atomic_long_andnot(RWSEM_FLAG_HANDOFF,  &sem->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	if (list_empty(&sem->wait_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 		atomic_long_andnot(RWSEM_FLAG_WAITERS, &sem->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 		rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	raw_spin_unlock_irq(&sem->wait_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	wake_up_q(&wake_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	lockevent_inc(rwsem_wlock_fail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	return ERR_PTR(-EINTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)  * handle waking up a waiter on the semaphore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)  * - up_read/up_write has decremented the active part of count if we come here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) static struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem, long count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	DEFINE_WAKE_Q(wake_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	raw_spin_lock_irqsave(&sem->wait_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	if (!list_empty(&sem->wait_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 		rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	trace_android_vh_rwsem_wake_finish(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	wake_up_q(&wake_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	return sem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)  * downgrade a write lock into a read lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)  * - caller incremented waiting part of count and discovered it still negative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)  * - just wake up any readers at the front of the queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	DEFINE_WAKE_Q(wake_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	raw_spin_lock_irqsave(&sem->wait_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	if (!list_empty(&sem->wait_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 		rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	wake_up_q(&wake_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	return sem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)  * lock for reading
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) static inline void __down_read(struct rw_semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	if (!rwsem_read_trylock(sem)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 		rwsem_down_read_slowpath(sem, TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 		DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 		rwsem_set_reader_owned(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) static inline int __down_read_interruptible(struct rw_semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	if (!rwsem_read_trylock(sem)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 		if (IS_ERR(rwsem_down_read_slowpath(sem, TASK_INTERRUPTIBLE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 			return -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 		DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 		rwsem_set_reader_owned(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) static inline int __down_read_killable(struct rw_semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	if (!rwsem_read_trylock(sem)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 		if (IS_ERR(rwsem_down_read_slowpath(sem, TASK_KILLABLE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 			return -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 		DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 		rwsem_set_reader_owned(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) static inline int __down_read_trylock(struct rw_semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	long tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	 * Optimize for the case when the rwsem is not locked at all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	tmp = RWSEM_UNLOCKED_VALUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 		if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 					tmp + RWSEM_READER_BIAS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 			rwsem_set_reader_owned(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	} while (!(tmp & RWSEM_READ_FAILED_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)  * lock for writing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) static inline void __down_write(struct rw_semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	long tmp = RWSEM_UNLOCKED_VALUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	if (unlikely(!atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 						      RWSEM_WRITER_LOCKED)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 		rwsem_down_write_slowpath(sem, TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 		rwsem_set_owner(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) static inline int __down_write_killable(struct rw_semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	long tmp = RWSEM_UNLOCKED_VALUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	if (unlikely(!atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 						      RWSEM_WRITER_LOCKED))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 		if (IS_ERR(rwsem_down_write_slowpath(sem, TASK_KILLABLE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 			return -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 		rwsem_set_owner(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) static inline int __down_write_trylock(struct rw_semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	long tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	tmp  = RWSEM_UNLOCKED_VALUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 					    RWSEM_WRITER_LOCKED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 		rwsem_set_owner(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)  * unlock after reading
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) static inline void __up_read(struct rw_semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	long tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	rwsem_clear_reader_owned(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 	DEBUG_RWSEMS_WARN_ON(tmp < 0, sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	if (unlikely((tmp & (RWSEM_LOCK_MASK|RWSEM_FLAG_WAITERS)) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 		      RWSEM_FLAG_WAITERS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 		clear_wr_nonspinnable(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 		rwsem_wake(sem, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	trace_android_vh_rwsem_up_read_end(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)  * unlock after writing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) static inline void __up_write(struct rw_semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	long tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	 * sem->owner may differ from current if the ownership is transferred
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	 * to an anonymous writer by setting the RWSEM_NONSPINNABLE bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	DEBUG_RWSEMS_WARN_ON((rwsem_owner(sem) != current) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 			    !rwsem_test_oflags(sem, RWSEM_NONSPINNABLE), sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 	rwsem_clear_owner(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	if (unlikely(tmp & RWSEM_FLAG_WAITERS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 		rwsem_wake(sem, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	trace_android_vh_rwsem_up_write_end(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)  * downgrade write lock to read lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) static inline void __downgrade_write(struct rw_semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	long tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	 * When downgrading from exclusive to shared ownership,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	 * anything inside the write-locked region cannot leak
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	 * into the read side. In contrast, anything in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	 * read-locked region is ok to be re-ordered into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	 * write side. As such, rely on RELEASE semantics.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	DEBUG_RWSEMS_WARN_ON(rwsem_owner(sem) != current, sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	tmp = atomic_long_fetch_add_release(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 		-RWSEM_WRITER_LOCKED+RWSEM_READER_BIAS, &sem->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	rwsem_set_reader_owned(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	if (tmp & RWSEM_FLAG_WAITERS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 		rwsem_downgrade_wake(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514)  * lock for reading
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) void __sched down_read(struct rw_semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 	LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) EXPORT_SYMBOL(down_read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) int __sched down_read_interruptible(struct rw_semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 	if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_interruptible)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 		rwsem_release(&sem->dep_map, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 		return -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) EXPORT_SYMBOL(down_read_interruptible);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) int __sched down_read_killable(struct rw_semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 		rwsem_release(&sem->dep_map, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 		return -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) EXPORT_SYMBOL(down_read_killable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)  * trylock for reading -- returns 1 if successful, 0 if contention
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) int down_read_trylock(struct rw_semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	int ret = __down_read_trylock(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 	if (ret == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 		rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) EXPORT_SYMBOL(down_read_trylock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567)  * lock for writing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) void __sched down_write(struct rw_semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) EXPORT_SYMBOL(down_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)  * lock for writing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) int __sched down_write_killable(struct rw_semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 				  __down_write_killable)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 		rwsem_release(&sem->dep_map, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 		return -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) EXPORT_SYMBOL(down_write_killable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)  * trylock for writing -- returns 1 if successful, 0 if contention
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) int down_write_trylock(struct rw_semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	int ret = __down_write_trylock(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	if (ret == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 		rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) EXPORT_SYMBOL(down_write_trylock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610)  * release a read lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) void up_read(struct rw_semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 	rwsem_release(&sem->dep_map, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	__up_read(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) EXPORT_SYMBOL(up_read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620)  * release a write lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) void up_write(struct rw_semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	rwsem_release(&sem->dep_map, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	trace_android_vh_rwsem_write_finished(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	__up_write(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) EXPORT_SYMBOL(up_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631)  * downgrade write lock to read lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) void downgrade_write(struct rw_semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	lock_downgrade(&sem->dep_map, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 	trace_android_vh_rwsem_write_finished(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	__downgrade_write(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) EXPORT_SYMBOL(downgrade_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) #ifdef CONFIG_DEBUG_LOCK_ALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) void down_read_nested(struct rw_semaphore *sem, int subclass)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 	rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 	LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) EXPORT_SYMBOL(down_read_nested);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) int down_read_killable_nested(struct rw_semaphore *sem, int subclass)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 	might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 	rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 	if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 		rwsem_release(&sem->dep_map, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 		return -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) EXPORT_SYMBOL(down_read_killable_nested);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 	might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 	LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) EXPORT_SYMBOL(_down_write_nest_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) void down_read_non_owner(struct rw_semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 	__down_read(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	__rwsem_set_reader_owned(sem, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) EXPORT_SYMBOL(down_read_non_owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) void down_write_nested(struct rw_semaphore *sem, int subclass)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 	might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 	rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 	LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) EXPORT_SYMBOL(down_write_nested);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) int __sched down_write_killable_nested(struct rw_semaphore *sem, int subclass)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 	rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 	if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 				  __down_write_killable)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 		rwsem_release(&sem->dep_map, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 		return -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) EXPORT_SYMBOL(down_write_killable_nested);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) void up_read_non_owner(struct rw_semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 	__up_read(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) EXPORT_SYMBOL(up_read_non_owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) #endif