Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags   |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2)  * \file drm_lock.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * IOCTLs for locking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * \author Rickard E. (Rik) Faith <faith@valinux.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * \author Gareth Hughes <gareth@valinux.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * Created: Tue Feb  2 08:37:54 1999 by faith@valinux.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  * All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  * Permission is hereby granted, free of charge, to any person obtaining a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  * copy of this software and associated documentation files (the "Software"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  * to deal in the Software without restriction, including without limitation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  * and/or sell copies of the Software, and to permit persons to whom the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  * Software is furnished to do so, subject to the following conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  * The above copyright notice and this permission notice (including the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  * paragraph) shall be included in all copies or substantial portions of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  * Software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  * OTHER DEALINGS IN THE SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #include <drm/drm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #include <drm/drm_drv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #include <drm/drm_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #include <drm/drm_print.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #include "drm_internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #include "drm_legacy.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50)  * Take the heavyweight lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52)  * \param lock lock pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53)  * \param context locking context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54)  * \return one if the lock is held, or zero otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) int drm_lock_take(struct drm_lock_data *lock_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 		  unsigned int context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	unsigned int old, new, prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	volatile unsigned int *lock = &lock_data->hw_lock->lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	spin_lock_bh(&lock_data->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 		old = *lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 		if (old & _DRM_LOCK_HELD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 			new = old | _DRM_LOCK_CONT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 			new = context | _DRM_LOCK_HELD |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 				((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 				 _DRM_LOCK_CONT : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		prev = cmpxchg(lock, old, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	} while (prev != old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	spin_unlock_bh(&lock_data->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	if (_DRM_LOCKING_CONTEXT(old) == context) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 		if (old & _DRM_LOCK_HELD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 			if (context != DRM_KERNEL_CONTEXT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 				DRM_ERROR("%d holds heavyweight lock\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 					  context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		/* Have lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97)  * This takes a lock forcibly and hands it to context.	Should ONLY be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98)  * inside *_unlock to give lock to kernel before calling *_dma_schedule.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)  * \param dev DRM device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)  * \param lock lock pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)  * \param context locking context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)  * \return always one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)  * Resets the lock file pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)  * Marks the lock as held by the given context, via the \p cmpxchg instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static int drm_lock_transfer(struct drm_lock_data *lock_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 			     unsigned int context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	unsigned int old, new, prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	volatile unsigned int *lock = &lock_data->hw_lock->lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	lock_data->file_priv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 		old = *lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		new = context | _DRM_LOCK_HELD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 		prev = cmpxchg(lock, old, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	} while (prev != old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static int drm_legacy_lock_free(struct drm_lock_data *lock_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 				unsigned int context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	unsigned int old, new, prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	volatile unsigned int *lock = &lock_data->hw_lock->lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	spin_lock_bh(&lock_data->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	if (lock_data->kernel_waiters != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		drm_lock_transfer(lock_data, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		lock_data->idle_has_lock = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		spin_unlock_bh(&lock_data->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	spin_unlock_bh(&lock_data->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 		old = *lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		new = _DRM_LOCKING_CONTEXT(old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 		prev = cmpxchg(lock, old, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	} while (prev != old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		DRM_ERROR("%d freed heavyweight lock held by %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 			  context, _DRM_LOCKING_CONTEXT(old));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	wake_up_interruptible(&lock_data->lock_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)  * Lock ioctl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)  * \param inode device inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)  * \param file_priv DRM file private.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)  * \param cmd command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)  * \param arg user argument, pointing to a drm_lock structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)  * \return zero on success or negative number on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)  * Add the current task to the lock wait queue, and attempt to take to lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) int drm_legacy_lock(struct drm_device *dev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		    struct drm_file *file_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	DECLARE_WAITQUEUE(entry, current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	struct drm_lock *lock = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	struct drm_master *master = file_priv->master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	++file_priv->lock_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	if (lock->context == DRM_KERNEL_CONTEXT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		DRM_ERROR("Process %d using kernel context %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 			  task_pid_nr(current), lock->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		  lock->context, task_pid_nr(current),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		  master->lock.hw_lock ? master->lock.hw_lock->lock : -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		  lock->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	add_wait_queue(&master->lock.lock_queue, &entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	spin_lock_bh(&master->lock.spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	master->lock.user_waiters++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	spin_unlock_bh(&master->lock.spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 		__set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		if (!master->lock.hw_lock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 			/* Device has been unregistered */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 			send_sig(SIGTERM, current, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 			ret = -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		if (drm_lock_take(&master->lock, lock->context)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 			master->lock.file_priv = file_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 			master->lock.lock_time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 			break;	/* Got lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		/* Contention */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 		mutex_unlock(&drm_global_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		mutex_lock(&drm_global_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		if (signal_pending(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 			ret = -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	spin_lock_bh(&master->lock.spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	master->lock.user_waiters--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	spin_unlock_bh(&master->lock.spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	__set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	remove_wait_queue(&master->lock.lock_queue, &entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	DRM_DEBUG("%d %s\n", lock->context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		  ret ? "interrupted" : "has lock");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	if (ret) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	/* don't set the block all signals on the master process for now 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	 * really probably not the correct answer but lets us debug xkb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)  	 * xserver for now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	if (!drm_is_current_master(file_priv)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		dev->sigdata.context = lock->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		dev->sigdata.lock = master->lock.hw_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		if (dev->driver->dma_quiescent(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 			DRM_DEBUG("%d waiting for DMA quiescent\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 				  lock->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 			return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)  * Unlock ioctl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)  * \param inode device inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)  * \param file_priv DRM file private.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)  * \param cmd command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)  * \param arg user argument, pointing to a drm_lock structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)  * \return zero on success or negative number on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)  * Transfer and free the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	struct drm_lock *lock = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	struct drm_master *master = file_priv->master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	if (lock->context == DRM_KERNEL_CONTEXT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 		DRM_ERROR("Process %d using kernel context %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 			  task_pid_nr(current), lock->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	if (drm_legacy_lock_free(&master->lock, lock->context)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 		/* FIXME: Should really bail out here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)  * This function returns immediately and takes the hw lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)  * with the kernel context if it is free, otherwise it gets the highest priority when and if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)  * it is eventually released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)  * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)  * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)  * a deadlock, which is why the "idlelock" was invented).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)  * This should be sufficient to wait for GPU idle without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)  * having to worry about starvation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) void drm_legacy_idlelock_take(struct drm_lock_data *lock_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	spin_lock_bh(&lock_data->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	lock_data->kernel_waiters++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	if (!lock_data->idle_has_lock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 		spin_unlock_bh(&lock_data->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 		ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 		spin_lock_bh(&lock_data->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 		if (ret == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 			lock_data->idle_has_lock = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	spin_unlock_bh(&lock_data->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) EXPORT_SYMBOL(drm_legacy_idlelock_take);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) void drm_legacy_idlelock_release(struct drm_lock_data *lock_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	unsigned int old, prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	volatile unsigned int *lock = &lock_data->hw_lock->lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	spin_lock_bh(&lock_data->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	if (--lock_data->kernel_waiters == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 		if (lock_data->idle_has_lock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 			do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 				old = *lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 				prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 			} while (prev != old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 			wake_up_interruptible(&lock_data->lock_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 			lock_data->idle_has_lock = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	spin_unlock_bh(&lock_data->spinlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) EXPORT_SYMBOL(drm_legacy_idlelock_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) static int drm_legacy_i_have_hw_lock(struct drm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 				     struct drm_file *file_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	struct drm_master *master = file_priv->master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	return (file_priv->lock_count && master->lock.hw_lock &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 		_DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 		master->lock.file_priv == file_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) void drm_legacy_lock_release(struct drm_device *dev, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	struct drm_file *file_priv = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	/* if the master has gone away we can't do anything with the lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	if (!dev->master)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	if (drm_legacy_i_have_hw_lock(dev, file_priv)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		DRM_DEBUG("File %p released, freeing lock for context %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 			  filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 		drm_legacy_lock_free(&file_priv->master->lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 				     _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) void drm_legacy_lock_master_cleanup(struct drm_device *dev, struct drm_master *master)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	 * Since the master is disappearing, so is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	 * possibility to lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	mutex_lock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	if (master->lock.hw_lock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 		if (dev->sigdata.lock == master->lock.hw_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 			dev->sigdata.lock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		master->lock.hw_lock = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 		master->lock.file_priv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 		wake_up_interruptible_all(&master->lock.lock_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	mutex_unlock(&dev->struct_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }