^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2008 Intel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Author: Matthew Wilcox <willy@linux.intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * This file implements counting semaphores.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * A counting semaphore may be acquired 'n' times before sleeping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * See mutex.c for single-acquisition sleeping locks which enforce
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * rules which allow code to be debugged more easily.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Some notes on the implementation:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * The spinlock controls access to the other members of the semaphore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * down_trylock() and up() can be called from interrupt context, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * have to disable interrupts when taking the lock. It turns out various
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * parts of the kernel expect to be able to use down() on a semaphore in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * interrupt context when they know it will succeed, so we have to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * irqsave variants for down(), down_interruptible() and down_killable()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * The ->count variable represents how many more tasks can acquire this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * semaphore. If it's zero, there may be tasks waiting on the wait_list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/sched/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/semaphore.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/ftrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static noinline void __down(struct semaphore *sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static noinline int __down_interruptible(struct semaphore *sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static noinline int __down_killable(struct semaphore *sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static noinline int __down_timeout(struct semaphore *sem, long timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static noinline void __up(struct semaphore *sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * down - acquire the semaphore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * @sem: the semaphore to be acquired
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * Acquires the semaphore. If no more tasks are allowed to acquire the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * semaphore, calling this function will put the task to sleep until the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * semaphore is released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * Use of this function is deprecated, please use down_interruptible() or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * down_killable() instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) void down(struct semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) raw_spin_lock_irqsave(&sem->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if (likely(sem->count > 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) sem->count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) __down(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) raw_spin_unlock_irqrestore(&sem->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) EXPORT_SYMBOL(down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * down_interruptible - acquire the semaphore unless interrupted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * @sem: the semaphore to be acquired
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * Attempts to acquire the semaphore. If no more tasks are allowed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * acquire the semaphore, calling this function will put the task to sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * If the sleep is interrupted by a signal, this function will return -EINTR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * If the semaphore is successfully acquired, this function returns 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) int down_interruptible(struct semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) int result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) raw_spin_lock_irqsave(&sem->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) if (likely(sem->count > 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) sem->count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) result = __down_interruptible(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) raw_spin_unlock_irqrestore(&sem->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) EXPORT_SYMBOL(down_interruptible);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * down_killable - acquire the semaphore unless killed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * @sem: the semaphore to be acquired
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * Attempts to acquire the semaphore. If no more tasks are allowed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * acquire the semaphore, calling this function will put the task to sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * If the sleep is interrupted by a fatal signal, this function will return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * -EINTR. If the semaphore is successfully acquired, this function returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) int down_killable(struct semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) int result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) raw_spin_lock_irqsave(&sem->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) if (likely(sem->count > 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) sem->count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) result = __down_killable(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) raw_spin_unlock_irqrestore(&sem->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) EXPORT_SYMBOL(down_killable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * down_trylock - try to acquire the semaphore, without waiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * @sem: the semaphore to be acquired
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * Try to acquire the semaphore atomically. Returns 0 if the semaphore has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * been acquired successfully or 1 if it cannot be acquired.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * NOTE: This return value is inverted from both spin_trylock and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * mutex_trylock! Be careful about this when converting code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * Unlike mutex_trylock, this function can be used from interrupt context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * and the semaphore can be released by any task or interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) int down_trylock(struct semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) raw_spin_lock_irqsave(&sem->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) count = sem->count - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (likely(count >= 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) sem->count = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) raw_spin_unlock_irqrestore(&sem->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return (count < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) EXPORT_SYMBOL(down_trylock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * down_timeout - acquire the semaphore within a specified time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * @sem: the semaphore to be acquired
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * @timeout: how long to wait before failing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * Attempts to acquire the semaphore. If no more tasks are allowed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * acquire the semaphore, calling this function will put the task to sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * If the semaphore is not released within the specified number of jiffies,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * this function returns -ETIME. It returns 0 if the semaphore was acquired.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) int down_timeout(struct semaphore *sem, long timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) int result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) raw_spin_lock_irqsave(&sem->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (likely(sem->count > 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) sem->count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) result = __down_timeout(sem, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) raw_spin_unlock_irqrestore(&sem->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) EXPORT_SYMBOL(down_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * up - release the semaphore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * @sem: the semaphore to release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * Release the semaphore. Unlike mutexes, up() may be called from any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * context and even by tasks which have never called down().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) void up(struct semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) raw_spin_lock_irqsave(&sem->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (likely(list_empty(&sem->wait_list)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) sem->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) __up(sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) raw_spin_unlock_irqrestore(&sem->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) EXPORT_SYMBOL(up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /* Functions for the contended case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct semaphore_waiter {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct task_struct *task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) bool up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * Because this function is inlined, the 'state' parameter will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * constant, and thus optimised away by the compiler. Likewise the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * 'timeout' parameter for the cases without timeouts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static inline int __sched __down_common(struct semaphore *sem, long state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) long timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) struct semaphore_waiter waiter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) list_add_tail(&waiter.list, &sem->wait_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) waiter.task = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) waiter.up = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (signal_pending_state(state, current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) goto interrupted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (unlikely(timeout <= 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) goto timed_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) __set_current_state(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) raw_spin_unlock_irq(&sem->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) timeout = schedule_timeout(timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) raw_spin_lock_irq(&sem->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (waiter.up)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) timed_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) list_del(&waiter.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) return -ETIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) interrupted:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) list_del(&waiter.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) static noinline void __sched __down(struct semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) static noinline int __sched __down_interruptible(struct semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return __down_common(sem, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static noinline int __sched __down_killable(struct semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return __down_common(sem, TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) static noinline int __sched __down_timeout(struct semaphore *sem, long timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return __down_common(sem, TASK_UNINTERRUPTIBLE, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) static noinline void __sched __up(struct semaphore *sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) struct semaphore_waiter, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) list_del(&waiter->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) waiter->up = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) wake_up_process(waiter->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }