^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * \author Rickard E. (Rik) Faith <faith@valinux.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * \author Daryll Strauss <daryll@valinux.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * \author Gareth Hughes <gareth@valinux.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Permission is hereby granted, free of charge, to any person obtaining a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * copy of this software and associated documentation files (the "Software"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * to deal in the Software without restriction, including without limitation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * the rights to use, copy, modify, merge, publish, distribute, sublicense,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * and/or sell copies of the Software, and to permit persons to whom the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * Software is furnished to do so, subject to the following conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * The above copyright notice and this permission notice (including the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * paragraph) shall be included in all copies or substantial portions of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * Software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * OTHER DEALINGS IN THE SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/anon_inodes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/dma-fence.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/poll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <drm/drm_client.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <drm/drm_drv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <drm/drm_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <drm/drm_print.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include "drm_crtc_internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include "drm_internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include "drm_legacy.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #if defined(CONFIG_MMU) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <uapi/asm/mman.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include <drm/drm_vma_manager.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /* from BKL pushdown */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) DEFINE_MUTEX(drm_global_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) bool drm_dev_needs_global_mutex(struct drm_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * Legacy drivers rely on all kinds of BKL locking semantics, don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * bother. They also still need BKL locking for their ioctls, so better
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * safe than sorry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (drm_core_check_feature(dev, DRIVER_LEGACY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * The deprecated ->load callback must be called after the driver is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * already registered. This means such drivers rely on the BKL to make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * sure an open can't proceed until the driver is actually fully set up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * Similar hilarity holds for the unload callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (dev->driver->load || dev->driver->unload)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * Drivers with the lastclose callback assume that it's synchronized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * against concurrent opens, which again needs the BKL. The proper fix
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * is to use the drm_client infrastructure with proper locking for each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * client.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (dev->driver->lastclose)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * DOC: file operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * Drivers must define the file operations structure that forms the DRM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * userspace API entry point, even though most of those operations are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * implemented in the DRM core. The resulting &struct file_operations must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * stored in the &drm_driver.fops field. The mandatory functions are drm_open(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * drm_read(), drm_ioctl() and drm_compat_ioctl() if CONFIG_COMPAT is enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * Note that drm_compat_ioctl will be NULL if CONFIG_COMPAT=n, so there's no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * need to sprinkle #ifdef into the code. Drivers which implement private ioctls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * that require 32/64 bit compatibility support must provide their own
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * &file_operations.compat_ioctl handler that processes private ioctls and calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * drm_compat_ioctl() for core ioctls.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * In addition drm_read() and drm_poll() provide support for DRM events. DRM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * events are a generic and extensible means to send asynchronous events to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * userspace through the file descriptor. They are used to send vblank event and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * page flip completions by the KMS API. But drivers can also use it for their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * own needs, e.g. to signal completion of rendering.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * For the driver-side event interface see drm_event_reserve_init() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * drm_send_event() as the main starting points.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * The memory mapping implementation will vary depending on how the driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * manages memory. Legacy drivers will use the deprecated drm_legacy_mmap()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * function, modern drivers should use one of the provided memory-manager
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * specific implementations. For GEM-based drivers this is drm_gem_mmap(), and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * for drivers which use the CMA GEM helpers it's drm_gem_cma_mmap().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * No other file operations are supported by the DRM userspace API. Overall the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * following is an example &file_operations structure::
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * static const example_drm_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * .open = drm_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * .release = drm_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * .unlocked_ioctl = drm_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * .compat_ioctl = drm_compat_ioctl, // NULL if CONFIG_COMPAT=n
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * .poll = drm_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * .read = drm_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * .llseek = no_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * .mmap = drm_gem_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * For plain GEM based drivers there is the DEFINE_DRM_GEM_FOPS() macro, and for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * CMA based drivers there is the DEFINE_DRM_GEM_CMA_FOPS() macro to make this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * simpler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * The driver's &file_operations must be stored in &drm_driver.fops.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * For driver-private IOCTL handling see the more detailed discussion in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * :ref:`IOCTL support in the userland interfaces chapter<drm_driver_ioctl>`.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * drm_file_alloc - allocate file context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * @minor: minor to allocate on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * This allocates a new DRM file context. It is not linked into any context and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * can be used by the caller freely. Note that the context keeps a pointer to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * @minor, so it must be freed before @minor is.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * Pointer to newly allocated context, ERR_PTR on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct drm_file *drm_file_alloc(struct drm_minor *minor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct drm_device *dev = minor->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) struct drm_file *file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) file = kzalloc(sizeof(*file), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (!file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) file->pid = get_pid(task_pid(current));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) file->minor = minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /* for compatibility root is always authenticated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) file->authenticated = capable(CAP_SYS_ADMIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) INIT_LIST_HEAD(&file->lhead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) INIT_LIST_HEAD(&file->fbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) mutex_init(&file->fbs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) INIT_LIST_HEAD(&file->blobs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) INIT_LIST_HEAD(&file->pending_event_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) INIT_LIST_HEAD(&file->event_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) init_waitqueue_head(&file->event_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) file->event_space = 4096; /* set aside 4k for event buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) mutex_init(&file->event_read_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (drm_core_check_feature(dev, DRIVER_GEM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) drm_gem_open(dev, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) drm_syncobj_open(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) drm_prime_init_file_private(&file->prime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (dev->driver->open) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) ret = dev->driver->open(dev, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) goto out_prime_destroy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) return file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) out_prime_destroy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) drm_prime_destroy_file_private(&file->prime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) drm_syncobj_release(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (drm_core_check_feature(dev, DRIVER_GEM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) drm_gem_release(dev, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) put_pid(file->pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) kfree(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) static void drm_events_release(struct drm_file *file_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct drm_device *dev = file_priv->minor->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) struct drm_pending_event *e, *et;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) spin_lock_irqsave(&dev->event_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /* Unlink pending events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) list_for_each_entry_safe(e, et, &file_priv->pending_event_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) pending_link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) list_del(&e->pending_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) e->file_priv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /* Remove unconsumed events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) list_del(&e->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) kfree(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) spin_unlock_irqrestore(&dev->event_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * drm_file_free - free file context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * @file: context to free, or NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * This destroys and deallocates a DRM file context previously allocated via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * drm_file_alloc(). The caller must make sure to unlink it from any contexts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * before calling this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * If NULL is passed, this is a no-op.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * 0 on success, or error code on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) void drm_file_free(struct drm_file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct drm_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (!file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) dev = file->minor->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) DRM_DEBUG("comm=\"%s\", pid=%d, dev=0x%lx, open_count=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) current->comm, task_pid_nr(current),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) (long)old_encode_dev(file->minor->kdev->devt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) atomic_read(&dev->open_count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) dev->driver->preclose)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) dev->driver->preclose(dev, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (drm_core_check_feature(dev, DRIVER_LEGACY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) drm_legacy_lock_release(dev, file->filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) drm_legacy_reclaim_buffers(dev, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) drm_events_release(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (drm_core_check_feature(dev, DRIVER_MODESET)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) drm_fb_release(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) drm_property_destroy_user_blobs(dev, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) drm_syncobj_release(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) if (drm_core_check_feature(dev, DRIVER_GEM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) drm_gem_release(dev, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) drm_legacy_ctxbitmap_flush(dev, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (drm_is_primary_client(file))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) drm_master_release(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) if (dev->driver->postclose)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) dev->driver->postclose(dev, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) drm_prime_destroy_file_private(&file->prime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) WARN_ON(!list_empty(&file->event_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) put_pid(file->pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) kfree(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) static void drm_close_helper(struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) struct drm_file *file_priv = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) struct drm_device *dev = file_priv->minor->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) mutex_lock(&dev->filelist_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) list_del(&file_priv->lhead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) mutex_unlock(&dev->filelist_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) drm_file_free(file_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * Check whether DRI will run on this CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * \return non-zero if the DRI will run on this CPU, or zero otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) static int drm_cpu_valid(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) #if defined(__sparc__) && !defined(__sparc_v9__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return 0; /* No cmpxchg before v9 sparc. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * Called whenever a process opens a drm node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * \param filp file pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * \param minor acquired minor-object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * \return zero on success or a negative number on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * Creates and initializes a drm_file structure for the file private data in \p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * filp and add it into the double linked list in \p dev.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) static int drm_open_helper(struct file *filp, struct drm_minor *minor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) struct drm_device *dev = minor->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) struct drm_file *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (filp->f_flags & O_EXCL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return -EBUSY; /* No exclusive opens */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) if (!drm_cpu_valid())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (dev->switch_power_state != DRM_SWITCH_POWER_ON &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) DRM_DEBUG("comm=\"%s\", pid=%d, minor=%d\n", current->comm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) task_pid_nr(current), minor->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) priv = drm_file_alloc(minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (IS_ERR(priv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) return PTR_ERR(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (drm_is_primary_client(priv)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) ret = drm_master_open(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) drm_file_free(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) filp->private_data = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) filp->f_mode |= FMODE_UNSIGNED_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) priv->filp = filp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) mutex_lock(&dev->filelist_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) list_add(&priv->lhead, &dev->filelist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) mutex_unlock(&dev->filelist_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) #ifdef __alpha__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * Default the hose
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (!dev->hose) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) struct pci_dev *pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (pci_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) dev->hose = pci_dev->sysdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) pci_dev_put(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (!dev->hose) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) struct pci_bus *b = list_entry(pci_root_buses.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) struct pci_bus, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) dev->hose = b->sysdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * drm_open - open method for DRM file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * @inode: device inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * @filp: file pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * This function must be used by drivers as their &file_operations.open method.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * It looks up the correct DRM device and instantiates all the per-file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * resources for it. It also calls the &drm_driver.open driver callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * 0 on success or negative errno value on falure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) int drm_open(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) struct drm_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) struct drm_minor *minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) int retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) int need_setup = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) minor = drm_minor_acquire(iminor(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (IS_ERR(minor))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) return PTR_ERR(minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) dev = minor->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (drm_dev_needs_global_mutex(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) mutex_lock(&drm_global_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (!atomic_fetch_inc(&dev->open_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) need_setup = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) /* share address_space across all char-devs of a single device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) filp->f_mapping = dev->anon_inode->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) retcode = drm_open_helper(filp, minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) if (retcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) goto err_undo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) if (need_setup) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) retcode = drm_legacy_setup(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (retcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) drm_close_helper(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) goto err_undo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (drm_dev_needs_global_mutex(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) mutex_unlock(&drm_global_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) err_undo:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) atomic_dec(&dev->open_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if (drm_dev_needs_global_mutex(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) mutex_unlock(&drm_global_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) drm_minor_release(minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) return retcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) EXPORT_SYMBOL(drm_open);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) void drm_lastclose(struct drm_device * dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) DRM_DEBUG("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (dev->driver->lastclose)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) dev->driver->lastclose(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) DRM_DEBUG("driver lastclose completed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) if (drm_core_check_feature(dev, DRIVER_LEGACY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) drm_legacy_dev_reinit(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) drm_client_dev_restore(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * drm_release - release method for DRM file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) * @inode: device inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) * @filp: file pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * This function must be used by drivers as their &file_operations.release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * method. It frees any resources associated with the open file, and calls the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * &drm_driver.postclose driver callback. If this is the last open file for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * DRM device also proceeds to call the &drm_driver.lastclose driver callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * Always succeeds and returns 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) int drm_release(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) struct drm_file *file_priv = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) struct drm_minor *minor = file_priv->minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) struct drm_device *dev = minor->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (drm_dev_needs_global_mutex(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) mutex_lock(&drm_global_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) drm_close_helper(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) if (atomic_dec_and_test(&dev->open_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) drm_lastclose(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if (drm_dev_needs_global_mutex(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) mutex_unlock(&drm_global_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) drm_minor_release(minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) EXPORT_SYMBOL(drm_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * drm_release_noglobal - release method for DRM file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * @inode: device inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * @filp: file pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * This function may be used by drivers as their &file_operations.release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * method. It frees any resources associated with the open file prior to taking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * the drm_global_mutex, which then calls the &drm_driver.postclose driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * callback. If this is the last open file for the DRM device also proceeds to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * call the &drm_driver.lastclose driver callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) * Always succeeds and returns 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) int drm_release_noglobal(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) struct drm_file *file_priv = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) struct drm_minor *minor = file_priv->minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) struct drm_device *dev = minor->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) drm_close_helper(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) if (atomic_dec_and_mutex_lock(&dev->open_count, &drm_global_mutex)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) drm_lastclose(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) mutex_unlock(&drm_global_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) drm_minor_release(minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) EXPORT_SYMBOL(drm_release_noglobal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) * drm_read - read method for DRM file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * @filp: file pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * @buffer: userspace destination pointer for the read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * @count: count in bytes to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * @offset: offset to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * This function must be used by drivers as their &file_operations.read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * method iff they use DRM events for asynchronous signalling to userspace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * Since events are used by the KMS API for vblank and page flip completion this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * means all modern display drivers must use it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) * @offset is ignored, DRM events are read like a pipe. Therefore drivers also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) * must set the &file_operation.llseek to no_llseek(). Polling support is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) * provided by drm_poll().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * This function will only ever read a full event. Therefore userspace must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * supply a big enough buffer to fit any event to ensure forward progress. Since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * the maximum event space is currently 4K it's recommended to just use that for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * safety.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * Number of bytes read (always aligned to full events, and can be 0) or a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) * negative error code on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) ssize_t drm_read(struct file *filp, char __user *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) size_t count, loff_t *offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) struct drm_file *file_priv = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) struct drm_device *dev = file_priv->minor->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) ret = mutex_lock_interruptible(&file_priv->event_read_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) struct drm_pending_event *e = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) spin_lock_irq(&dev->event_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (!list_empty(&file_priv->event_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) e = list_first_entry(&file_priv->event_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) struct drm_pending_event, link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) file_priv->event_space += e->event->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) list_del(&e->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) spin_unlock_irq(&dev->event_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (e == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (filp->f_flags & O_NONBLOCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) mutex_unlock(&file_priv->event_read_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) ret = wait_event_interruptible(file_priv->event_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) !list_empty(&file_priv->event_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) if (ret >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) ret = mutex_lock_interruptible(&file_priv->event_read_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) unsigned length = e->event->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) if (length > count - ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) put_back_event:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) spin_lock_irq(&dev->event_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) file_priv->event_space -= length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) list_add(&e->link, &file_priv->event_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) spin_unlock_irq(&dev->event_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) wake_up_interruptible_poll(&file_priv->event_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) EPOLLIN | EPOLLRDNORM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if (copy_to_user(buffer + ret, e->event, length)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) goto put_back_event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) ret += length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) kfree(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) mutex_unlock(&file_priv->event_read_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) EXPORT_SYMBOL(drm_read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * drm_poll - poll method for DRM file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * @filp: file pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * @wait: poll waiter table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * This function must be used by drivers as their &file_operations.read method
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) * iff they use DRM events for asynchronous signalling to userspace. Since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) * events are used by the KMS API for vblank and page flip completion this means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) * all modern display drivers must use it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) * See also drm_read().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) * Mask of POLL flags indicating the current status of the file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) __poll_t drm_poll(struct file *filp, struct poll_table_struct *wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) struct drm_file *file_priv = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) __poll_t mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) poll_wait(filp, &file_priv->event_wait, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) if (!list_empty(&file_priv->event_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) mask |= EPOLLIN | EPOLLRDNORM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) return mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) EXPORT_SYMBOL(drm_poll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) * drm_event_reserve_init_locked - init a DRM event and reserve space for it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) * @dev: DRM device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) * @file_priv: DRM file private data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) * @p: tracking structure for the pending event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) * @e: actual event data to deliver to userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) * This function prepares the passed in event for eventual delivery. If the event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) * doesn't get delivered (because the IOCTL fails later on, before queuing up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * anything) then the even must be cancelled and freed using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * drm_event_cancel_free(). Successfully initialized events should be sent out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) * using drm_send_event() or drm_send_event_locked() to signal completion of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * asynchronous event to userspace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * If callers embedded @p into a larger structure it must be allocated with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * kmalloc and @p must be the first member element.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * This is the locked version of drm_event_reserve_init() for callers which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * already hold &drm_device.event_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) * 0 on success or a negative error code on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) int drm_event_reserve_init_locked(struct drm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) struct drm_file *file_priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) struct drm_pending_event *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) struct drm_event *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) if (file_priv->event_space < e->length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) file_priv->event_space -= e->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) p->event = e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) list_add(&p->pending_link, &file_priv->pending_event_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) p->file_priv = file_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) EXPORT_SYMBOL(drm_event_reserve_init_locked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) * drm_event_reserve_init - init a DRM event and reserve space for it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * @dev: DRM device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) * @file_priv: DRM file private data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * @p: tracking structure for the pending event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * @e: actual event data to deliver to userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) * This function prepares the passed in event for eventual delivery. If the event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) * doesn't get delivered (because the IOCTL fails later on, before queuing up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) * anything) then the even must be cancelled and freed using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) * drm_event_cancel_free(). Successfully initialized events should be sent out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) * using drm_send_event() or drm_send_event_locked() to signal completion of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * asynchronous event to userspace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * If callers embedded @p into a larger structure it must be allocated with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * kmalloc and @p must be the first member element.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * Callers which already hold &drm_device.event_lock should use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * drm_event_reserve_init_locked() instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * 0 on success or a negative error code on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) int drm_event_reserve_init(struct drm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) struct drm_file *file_priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) struct drm_pending_event *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) struct drm_event *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) spin_lock_irqsave(&dev->event_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) ret = drm_event_reserve_init_locked(dev, file_priv, p, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) spin_unlock_irqrestore(&dev->event_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) EXPORT_SYMBOL(drm_event_reserve_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) * drm_event_cancel_free - free a DRM event and release its space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * @dev: DRM device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) * @p: tracking structure for the pending event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * This function frees the event @p initialized with drm_event_reserve_init()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * and releases any allocated space. It is used to cancel an event when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) * nonblocking operation could not be submitted and needed to be aborted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) void drm_event_cancel_free(struct drm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) struct drm_pending_event *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) spin_lock_irqsave(&dev->event_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) if (p->file_priv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) p->file_priv->event_space += p->event->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) list_del(&p->pending_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) spin_unlock_irqrestore(&dev->event_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (p->fence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) dma_fence_put(p->fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) kfree(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) EXPORT_SYMBOL(drm_event_cancel_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) * drm_send_event_helper - send DRM event to file descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) * @dev: DRM device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) * @e: DRM event to deliver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) * @timestamp: timestamp to set for the fence event in kernel's CLOCK_MONOTONIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) * time domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) * This helper function sends the event @e, initialized with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) * drm_event_reserve_init(), to its associated userspace DRM file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) * The timestamp variant of dma_fence_signal is used when the caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) * sends a valid timestamp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) void drm_send_event_helper(struct drm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) struct drm_pending_event *e, ktime_t timestamp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) assert_spin_locked(&dev->event_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (e->completion) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) complete_all(e->completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) e->completion_release(e->completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) e->completion = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (e->fence) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if (timestamp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) dma_fence_signal_timestamp(e->fence, timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) dma_fence_signal(e->fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) dma_fence_put(e->fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) if (!e->file_priv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) kfree(e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) list_del(&e->pending_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) list_add_tail(&e->link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) &e->file_priv->event_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) wake_up_interruptible_poll(&e->file_priv->event_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) EPOLLIN | EPOLLRDNORM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) * drm_send_event_timestamp_locked - send DRM event to file descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) * @dev: DRM device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) * @e: DRM event to deliver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) * @timestamp: timestamp to set for the fence event in kernel's CLOCK_MONOTONIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) * time domain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) * This function sends the event @e, initialized with drm_event_reserve_init(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) * to its associated userspace DRM file. Callers must already hold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) * &drm_device.event_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) * Note that the core will take care of unlinking and disarming events when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) * corresponding DRM file is closed. Drivers need not worry about whether the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) * DRM file for this event still exists and can call this function upon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) * completion of the asynchronous work unconditionally.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) void drm_send_event_timestamp_locked(struct drm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) struct drm_pending_event *e, ktime_t timestamp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) drm_send_event_helper(dev, e, timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) EXPORT_SYMBOL(drm_send_event_timestamp_locked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) * drm_send_event_locked - send DRM event to file descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) * @dev: DRM device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) * @e: DRM event to deliver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) * This function sends the event @e, initialized with drm_event_reserve_init(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) * to its associated userspace DRM file. Callers must already hold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) * &drm_device.event_lock, see drm_send_event() for the unlocked version.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) * Note that the core will take care of unlinking and disarming events when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) * corresponding DRM file is closed. Drivers need not worry about whether the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) * DRM file for this event still exists and can call this function upon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) * completion of the asynchronous work unconditionally.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) drm_send_event_helper(dev, e, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) EXPORT_SYMBOL(drm_send_event_locked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) * drm_send_event - send DRM event to file descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) * @dev: DRM device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) * @e: DRM event to deliver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) * This function sends the event @e, initialized with drm_event_reserve_init(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) * to its associated userspace DRM file. This function acquires
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) * &drm_device.event_lock, see drm_send_event_locked() for callers which already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) * hold this lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) * Note that the core will take care of unlinking and disarming events when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * corresponding DRM file is closed. Drivers need not worry about whether the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * DRM file for this event still exists and can call this function upon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) * completion of the asynchronous work unconditionally.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) void drm_send_event(struct drm_device *dev, struct drm_pending_event *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) unsigned long irqflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) spin_lock_irqsave(&dev->event_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) drm_send_event_helper(dev, e, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) spin_unlock_irqrestore(&dev->event_lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) EXPORT_SYMBOL(drm_send_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) * mock_drm_getfile - Create a new struct file for the drm device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) * @minor: drm minor to wrap (e.g. #drm_device.primary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) * @flags: file creation mode (O_RDWR etc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) * This create a new struct file that wraps a DRM file context around a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) * DRM minor. This mimicks userspace opening e.g. /dev/dri/card0, but without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) * invoking userspace. The struct file may be operated on using its f_op
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) * (the drm_device.driver.fops) to mimick userspace operations, or be supplied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) * to userspace facing functions as an internal/anonymous client.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) * Pointer to newly created struct file, ERR_PTR on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) struct drm_device *dev = minor->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) struct drm_file *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) struct file *file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) priv = drm_file_alloc(minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) if (IS_ERR(priv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) return ERR_CAST(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) file = anon_inode_getfile("drm", dev->driver->fops, priv, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) if (IS_ERR(file)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) drm_file_free(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) return file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) /* Everyone shares a single global address space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) file->f_mapping = dev->anon_inode->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) drm_dev_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) priv->filp = file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) return file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) EXPORT_SYMBOL_FOR_TESTS_ONLY(mock_drm_getfile);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) #ifdef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) #ifdef CONFIG_TRANSPARENT_HUGEPAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) * drm_addr_inflate() attempts to construct an aligned area by inflating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) * the area size and skipping the unaligned start of the area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) * adapted from shmem_get_unmapped_area()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) static unsigned long drm_addr_inflate(unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) unsigned long len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) unsigned long pgoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) unsigned long flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) unsigned long huge_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) unsigned long offset, inflated_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) unsigned long inflated_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) unsigned long inflated_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) offset = (pgoff << PAGE_SHIFT) & (huge_size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) if (offset && offset + len < 2 * huge_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) if ((addr & (huge_size - 1)) == offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) inflated_len = len + huge_size - PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) if (inflated_len > TASK_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) if (inflated_len < len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) inflated_addr = current->mm->get_unmapped_area(NULL, 0, inflated_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) 0, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) if (IS_ERR_VALUE(inflated_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) if (inflated_addr & ~PAGE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) inflated_offset = inflated_addr & (huge_size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) inflated_addr += offset - inflated_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) if (inflated_offset > offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) inflated_addr += huge_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) if (inflated_addr > TASK_SIZE - len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) return inflated_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) * drm_get_unmapped_area() - Get an unused user-space virtual memory area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) * suitable for huge page table entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) * @file: The struct file representing the address space being mmap()'d.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) * @uaddr: Start address suggested by user-space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * @len: Length of the area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) * @pgoff: The page offset into the address space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) * @flags: mmap flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) * @mgr: The address space manager used by the drm driver. This argument can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) * probably be removed at some point when all drivers use the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) * address space manager.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) * This function attempts to find an unused user-space virtual memory area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) * that can accommodate the size we want to map, and that is properly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) * aligned to facilitate huge page table entries matching actual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) * huge pages or huge page aligned memory in buffer objects. Buffer objects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) * are assumed to start at huge page boundary pfns (io memory) or be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) * populated by huge pages aligned to the start of the buffer object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) * (system- or coherent memory). Adapted from shmem_get_unmapped_area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) * Return: aligned user-space address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) unsigned long drm_get_unmapped_area(struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) unsigned long uaddr, unsigned long len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) unsigned long pgoff, unsigned long flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) struct drm_vma_offset_manager *mgr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) unsigned long inflated_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) struct drm_vma_offset_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) if (len > TASK_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) * @pgoff is the file page-offset the huge page boundaries of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) * which typically aligns to physical address huge page boundaries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) * That's not true for DRM, however, where physical address huge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) * page boundaries instead are aligned with the offset from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) * buffer object start. So adjust @pgoff to be the offset from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) * buffer object start.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) drm_vma_offset_lock_lookup(mgr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) node = drm_vma_offset_lookup_locked(mgr, pgoff, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) if (node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) pgoff -= node->vm_node.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) drm_vma_offset_unlock_lookup(mgr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) addr = current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) if (IS_ERR_VALUE(addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) if (addr & ~PAGE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) if (addr > TASK_SIZE - len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) if (len < HPAGE_PMD_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) if (flags & MAP_FIXED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) * Our priority is to support MAP_SHARED mapped hugely;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) * and support MAP_PRIVATE mapped hugely too, until it is COWed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) * But if caller specified an address hint, respect that as before.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) if (uaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) inflated_addr = drm_addr_inflate(addr, len, pgoff, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) HPAGE_PMD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) len >= HPAGE_PUD_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) inflated_addr = drm_addr_inflate(inflated_addr, len, pgoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) flags, HPAGE_PUD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) return inflated_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) #else /* CONFIG_TRANSPARENT_HUGEPAGE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) unsigned long drm_get_unmapped_area(struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) unsigned long uaddr, unsigned long len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) unsigned long pgoff, unsigned long flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) struct drm_vma_offset_manager *mgr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) return current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) EXPORT_SYMBOL_GPL(drm_get_unmapped_area);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) #endif /* CONFIG_MMU */