^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright © 2008 Intel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Permission is hereby granted, free of charge, to any person obtaining a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * copy of this software and associated documentation files (the "Software"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * to deal in the Software without restriction, including without limitation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * the rights to use, copy, modify, merge, publish, distribute, sublicense,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * and/or sell copies of the Software, and to permit persons to whom the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Software is furnished to do so, subject to the following conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * The above copyright notice and this permission notice (including the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * paragraph) shall be included in all copies or substantial portions of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * IN THE SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * Authors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * Eric Anholt <eric@anholt.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/mman.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/shmem_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/dma-buf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/mem_encrypt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/pagevec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <drm/drm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <drm/drm_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <drm/drm_drv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <drm/drm_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <drm/drm_gem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <drm/drm_managed.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <drm/drm_print.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <drm/drm_vma_manager.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include "drm_internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) /** @file drm_gem.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * This file provides some of the base ioctls and library routines for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * the graphics memory manager implemented by each device driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * Because various devices have different requirements in terms of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * synchronization and migration strategies, implementing that is left up to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * the driver, and all that the general API provides should be generic --
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * allocating objects, reading/writing data with the cpu, freeing objects.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * Even there, platform-dependent optimizations for reading/writing data with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * the CPU mean we'll likely hook those out to driver-specific calls. However,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * the DRI2 implementation wants to have at least allocate/mmap be generic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * The goal was to have swap-backed object allocation managed through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * struct file. However, file descriptors as handles to a struct file have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * two major failings:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * - Process limits prevent more than 1024 or so being used at a time by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * default.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * - Inability to allocate high fds will aggravate the X Server's select()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * handling, and likely that of many GL client applications as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * This led to a plan of using our own integer IDs (called handles, following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * DRM terminology) to mimic fds, and implement the fd syscalls we need as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * ioctls. The objects themselves will still include the struct file so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * that we can transition to fds if the required kernel infrastructure shows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * up at a later date, and as our interface with shmfs for memory allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) drm_gem_init_release(struct drm_device *dev, void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) drm_vma_offset_manager_destroy(dev->vma_offset_manager);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * drm_gem_init - Initialize the GEM device fields
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * @dev: drm_devic structure to initialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) drm_gem_init(struct drm_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct drm_vma_offset_manager *vma_offset_manager;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) mutex_init(&dev->object_name_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) idr_init_base(&dev->object_name_idr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (!vma_offset_manager) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) DRM_ERROR("out of memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) dev->vma_offset_manager = vma_offset_manager;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) drm_vma_offset_manager_init(vma_offset_manager,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) DRM_FILE_PAGE_OFFSET_START,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) DRM_FILE_PAGE_OFFSET_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return drmm_add_action(dev, drm_gem_init_release, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * drm_gem_object_init - initialize an allocated shmem-backed GEM object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * @dev: drm_device the object should be initialized for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * @obj: drm_gem_object to initialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * @size: object size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * Initialize an already allocated GEM object of the specified size with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * shmfs backing store.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) int drm_gem_object_init(struct drm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct drm_gem_object *obj, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct file *filp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) drm_gem_private_object_init(dev, obj, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (IS_ERR(filp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) return PTR_ERR(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) obj->filp = filp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) EXPORT_SYMBOL(drm_gem_object_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * drm_gem_private_object_init - initialize an allocated private GEM object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * @dev: drm_device the object should be initialized for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * @obj: drm_gem_object to initialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * @size: object size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * Initialize an already allocated GEM object of the specified size with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * no GEM provided backing store. Instead the caller is responsible for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * backing the object and handling it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) void drm_gem_private_object_init(struct drm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct drm_gem_object *obj, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) BUG_ON((size & (PAGE_SIZE - 1)) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) obj->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) obj->filp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) kref_init(&obj->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) obj->handle_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) obj->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) dma_resv_init(&obj->_resv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (!obj->resv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) obj->resv = &obj->_resv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) drm_vma_node_reset(&obj->vma_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) EXPORT_SYMBOL(drm_gem_private_object_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * drm_gem_object_handle_free - release resources bound to userspace handles
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * @obj: GEM object to clean up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * Called after the last handle to the object has been closed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * Removes any name for the object. Note that this must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * called before drm_gem_object_free or we'll be touching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * freed memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static void drm_gem_object_handle_free(struct drm_gem_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct drm_device *dev = obj->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /* Remove any name for this object */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (obj->name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) idr_remove(&dev->object_name_idr, obj->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) obj->name = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /* Unbreak the reference cycle if we have an exported dma_buf. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (obj->dma_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) dma_buf_put(obj->dma_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) obj->dma_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) struct drm_device *dev = obj->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) bool final = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (WARN_ON(READ_ONCE(obj->handle_count) == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * Must bump handle count first as this may be the last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * ref, in which case the object would disappear before we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * checked for a name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) mutex_lock(&dev->object_name_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (--obj->handle_count == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) drm_gem_object_handle_free(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) drm_gem_object_exported_dma_buf_free(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) final = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) mutex_unlock(&dev->object_name_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (final)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) drm_gem_object_put(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * Called at device or object close to release the file's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * handle references on objects.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) drm_gem_object_release_handle(int id, void *ptr, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct drm_file *file_priv = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) struct drm_gem_object *obj = ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) struct drm_device *dev = obj->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (obj->funcs && obj->funcs->close)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) obj->funcs->close(obj, file_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) else if (dev->driver->gem_close_object)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) dev->driver->gem_close_object(obj, file_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) drm_prime_remove_buf_handle(&file_priv->prime, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) drm_vma_node_revoke(&obj->vma_node, file_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) drm_gem_object_handle_put_unlocked(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * drm_gem_handle_delete - deletes the given file-private handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * @filp: drm file-private structure to use for the handle look up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * @handle: userspace handle to delete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * Removes the GEM handle from the @filp lookup table which has been added with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * drm_gem_handle_create(). If this is the last handle also cleans up linked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * resources like GEM names.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) drm_gem_handle_delete(struct drm_file *filp, u32 handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) struct drm_gem_object *obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) spin_lock(&filp->table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) /* Check if we currently have a reference on the object */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) obj = idr_replace(&filp->object_idr, NULL, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) spin_unlock(&filp->table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (IS_ERR_OR_NULL(obj))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /* Release driver's reference and decrement refcount. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) drm_gem_object_release_handle(handle, obj, filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /* And finally make the handle available for future allocations. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) spin_lock(&filp->table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) idr_remove(&filp->object_idr, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) spin_unlock(&filp->table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) EXPORT_SYMBOL(drm_gem_handle_delete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * @file: drm file-private structure containing the gem object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * @dev: corresponding drm_device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * @handle: gem object handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * @offset: return location for the fake mmap offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * This implements the &drm_driver.dumb_map_offset kms driver callback for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * drivers which use gem to manage their backing storage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * 0 on success or a negative error code on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) u32 handle, u64 *offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) struct drm_gem_object *obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) obj = drm_gem_object_lookup(file, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if (!obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) /* Don't allow imported objects to be mapped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) if (obj->import_attach) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) ret = drm_gem_create_mmap_offset(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) *offset = drm_vma_node_offset_addr(&obj->vma_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) drm_gem_object_put(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * @file: drm file-private structure to remove the dumb handle from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * @dev: corresponding drm_device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * @handle: the dumb handle to remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * This implements the &drm_driver.dumb_destroy kms driver callback for drivers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * which use gem to manage their backing storage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) int drm_gem_dumb_destroy(struct drm_file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) struct drm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) uint32_t handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) return drm_gem_handle_delete(file, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) EXPORT_SYMBOL(drm_gem_dumb_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * drm_gem_handle_create_tail - internal functions to create a handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * @file_priv: drm file-private structure to register the handle for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * @obj: object to register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * @handlep: pointer to return the created handle to the caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * This expects the &drm_device.object_name_lock to be held already and will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * drop it before returning. Used to avoid races in establishing new handles
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * when importing an object from either an flink name or a dma-buf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * Handles must be release again through drm_gem_handle_delete(). This is done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * when userspace closes @file_priv for all attached handles, or through the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * GEM_CLOSE ioctl for individual handles.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) drm_gem_handle_create_tail(struct drm_file *file_priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) struct drm_gem_object *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) u32 *handlep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) struct drm_device *dev = obj->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) u32 handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) WARN_ON(!mutex_is_locked(&dev->object_name_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (obj->handle_count++ == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) drm_gem_object_get(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * Get the user-visible handle using idr. Preload and perform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * allocation under our spinlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) idr_preload(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) spin_lock(&file_priv->table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) spin_unlock(&file_priv->table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) idr_preload_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) mutex_unlock(&dev->object_name_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) goto err_unref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) handle = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) ret = drm_vma_node_allow(&obj->vma_node, file_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) goto err_remove;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (obj->funcs && obj->funcs->open) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) ret = obj->funcs->open(obj, file_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) goto err_revoke;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) } else if (dev->driver->gem_open_object) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) ret = dev->driver->gem_open_object(obj, file_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) goto err_revoke;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) *handlep = handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) err_revoke:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) drm_vma_node_revoke(&obj->vma_node, file_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) err_remove:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) spin_lock(&file_priv->table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) idr_remove(&file_priv->object_idr, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) spin_unlock(&file_priv->table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) err_unref:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) drm_gem_object_handle_put_unlocked(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) * drm_gem_handle_create - create a gem handle for an object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * @file_priv: drm file-private structure to register the handle for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * @obj: object to register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * @handlep: pointer to return the created handle to the caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * Create a handle for this object. This adds a handle reference to the object,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * which includes a regular reference count. Callers will likely want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * dereference the object afterwards.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * Since this publishes @obj to userspace it must be fully set up by this point,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * drivers must call this last in their buffer object creation callbacks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) int drm_gem_handle_create(struct drm_file *file_priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) struct drm_gem_object *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) u32 *handlep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) mutex_lock(&obj->dev->object_name_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) return drm_gem_handle_create_tail(file_priv, obj, handlep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) EXPORT_SYMBOL(drm_gem_handle_create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * drm_gem_free_mmap_offset - release a fake mmap offset for an object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * @obj: obj in question
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * Note that drm_gem_object_release() already calls this function, so drivers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * don't have to take care of releasing the mmap offset themselves when freeing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * the GEM object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) drm_gem_free_mmap_offset(struct drm_gem_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) struct drm_device *dev = obj->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) EXPORT_SYMBOL(drm_gem_free_mmap_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * @obj: obj in question
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * @size: the virtual size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * GEM memory mapping works by handing back to userspace a fake mmap offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * it can use in a subsequent mmap(2) call. The DRM core code then looks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * up the object based on the offset and sets up the various memory mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * structures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * This routine allocates and attaches a fake offset for @obj, in cases where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * the virtual size differs from the physical size (ie. &drm_gem_object.size).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * Otherwise just use drm_gem_create_mmap_offset().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) * This function is idempotent and handles an already allocated mmap offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) * transparently. Drivers do not need to check for this case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) struct drm_device *dev = obj->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) size / PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * drm_gem_create_mmap_offset - create a fake mmap offset for an object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * @obj: obj in question
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * GEM memory mapping works by handing back to userspace a fake mmap offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * it can use in a subsequent mmap(2) call. The DRM core code then looks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * up the object based on the offset and sets up the various memory mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * structures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * This routine allocates and attaches a fake offset for @obj.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * the fake offset again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) return drm_gem_create_mmap_offset_size(obj, obj->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) EXPORT_SYMBOL(drm_gem_create_mmap_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * Move pages to appropriate lru and release the pagevec, decrementing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * ref count of those pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) static void drm_gem_check_release_pagevec(struct pagevec *pvec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) check_move_unevictable_pages(pvec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) __pagevec_release(pvec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * drm_gem_get_pages - helper to allocate backing pages for a GEM object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * from shmem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * @obj: obj in question
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) * This reads the page-array of the shmem-backing storage of the given gem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * object. An array of pages is returned. If a page is not allocated or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * swapped-out, this will allocate/swap-in the required pages. Note that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * whole object is covered by the page-array and pinned in memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * Use drm_gem_put_pages() to release the array and unpin all pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * If you require other GFP-masks, you have to do those allocations yourself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * Note that you are not allowed to change gfp-zones during runtime. That is,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * set during initialization. If you have special zone constraints, set them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * to keep pages in the required zone during swap-in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * This function is only valid on objects initialized with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * drm_gem_object_init(), but not for those initialized with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * drm_gem_private_object_init() only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) struct page **drm_gem_get_pages(struct drm_gem_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) struct address_space *mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) struct page *p, **pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) struct pagevec pvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) int i, npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (WARN_ON(!obj->filp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) /* This is the shared memory object that backs the GEM resource */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) mapping = obj->filp->f_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) /* We already BUG_ON() for non-page-aligned sizes in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) * drm_gem_object_init(), so we should never hit this unless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * driver author is doing something really wrong:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) npages = obj->size >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (pages == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) mapping_set_unevictable(mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) for (i = 0; i < npages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) p = shmem_read_mapping_page(mapping, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (IS_ERR(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) pages[i] = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) /* Make sure shmem keeps __GFP_DMA32 allocated pages in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) * correct region during swapin. Note that this requires
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) * so shmem can relocate pages during swapin if required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) (page_to_pfn(p) >= 0x00100000UL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) return pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) mapping_clear_unevictable(mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) pagevec_init(&pvec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) while (i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) if (!pagevec_add(&pvec, pages[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) drm_gem_check_release_pagevec(&pvec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) if (pagevec_count(&pvec))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) drm_gem_check_release_pagevec(&pvec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) kvfree(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) return ERR_CAST(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) EXPORT_SYMBOL(drm_gem_get_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * drm_gem_put_pages - helper to free backing pages for a GEM object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) * @obj: obj in question
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) * @pages: pages to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * @dirty: if true, pages will be marked as dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * @accessed: if true, the pages will be marked as accessed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) bool dirty, bool accessed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) int i, npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) struct address_space *mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) struct pagevec pvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) mapping = file_inode(obj->filp)->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) mapping_clear_unevictable(mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) /* We already BUG_ON() for non-page-aligned sizes in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) * drm_gem_object_init(), so we should never hit this unless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * driver author is doing something really wrong:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) npages = obj->size >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) pagevec_init(&pvec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) for (i = 0; i < npages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (!pages[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (dirty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) set_page_dirty(pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) if (accessed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) mark_page_accessed(pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) /* Undo the reference we took when populating the table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) if (!pagevec_add(&pvec, pages[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) drm_gem_check_release_pagevec(&pvec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (pagevec_count(&pvec))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) drm_gem_check_release_pagevec(&pvec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) kvfree(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) EXPORT_SYMBOL(drm_gem_put_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) struct drm_gem_object **objs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) int i, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) struct drm_gem_object *obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) spin_lock(&filp->table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) for (i = 0; i < count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) /* Check if we currently have a reference on the object */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) obj = idr_find(&filp->object_idr, handle[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) if (!obj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) drm_gem_object_get(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) objs[i] = obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) spin_unlock(&filp->table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) * drm_gem_objects_lookup - look up GEM objects from an array of handles
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) * @filp: DRM file private date
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) * @bo_handles: user pointer to array of userspace handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) * @count: size of handle array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * @objs_out: returned pointer to array of drm_gem_object pointers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) * Takes an array of userspace handles and returns a newly allocated array of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * GEM objects.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * For a single handle lookup, use drm_gem_object_lookup().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * @objs filled in with GEM object pointers. Returned GEM objects need to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) * released with drm_gem_object_put(). -ENOENT is returned on a lookup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) * failure. 0 is returned on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) int count, struct drm_gem_object ***objs_out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) u32 *handles;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) struct drm_gem_object **objs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) if (!count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) objs = kvmalloc_array(count, sizeof(struct drm_gem_object *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) GFP_KERNEL | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) if (!objs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) *objs_out = objs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) if (!handles) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) if (copy_from_user(handles, bo_handles, count * sizeof(u32))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) DRM_DEBUG("Failed to copy in GEM handles\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) ret = objects_lookup(filp, handles, count, objs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) kvfree(handles);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) EXPORT_SYMBOL(drm_gem_objects_lookup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * drm_gem_object_lookup - look up a GEM object from its handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * @filp: DRM file private date
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * @handle: userspace handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * A reference to the object named by the handle if such exists on @filp, NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) * otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * If looking up an array of handles, use drm_gem_objects_lookup().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) struct drm_gem_object *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) drm_gem_object_lookup(struct drm_file *filp, u32 handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) struct drm_gem_object *obj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) objects_lookup(filp, &handle, 1, &obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) return obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) EXPORT_SYMBOL(drm_gem_object_lookup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * shared and/or exclusive fences.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) * @filep: DRM file private date
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * @handle: userspace handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * @wait_all: if true, wait on all fences, else wait on just exclusive fence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * @timeout: timeout value in jiffies or zero to return immediately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) * greater than 0 on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) bool wait_all, unsigned long timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) struct drm_gem_object *obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) obj = drm_gem_object_lookup(filep, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) if (!obj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) DRM_DEBUG("Failed to look up GEM BO %d\n", handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) true, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) ret = -ETIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) else if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) drm_gem_object_put(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) EXPORT_SYMBOL(drm_gem_dma_resv_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) * @dev: drm_device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) * @data: ioctl data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) * @file_priv: drm file-private structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) * Releases the handle to an mm object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) drm_gem_close_ioctl(struct drm_device *dev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) struct drm_file *file_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) struct drm_gem_close *args = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) if (!drm_core_check_feature(dev, DRIVER_GEM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) ret = drm_gem_handle_delete(file_priv, args->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) * @dev: drm_device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) * @data: ioctl data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) * @file_priv: drm file-private structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) * Create a global name for an object, returning the name.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) * Note that the name does not hold a reference; when the object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) * is freed, the name goes away.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) drm_gem_flink_ioctl(struct drm_device *dev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) struct drm_file *file_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) struct drm_gem_flink *args = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) struct drm_gem_object *obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) if (!drm_core_check_feature(dev, DRIVER_GEM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) obj = drm_gem_object_lookup(file_priv, args->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (obj == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) mutex_lock(&dev->object_name_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) /* prevent races with concurrent gem_close. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) if (obj->handle_count == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) if (!obj->name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) obj->name = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) args->name = (uint64_t) obj->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) mutex_unlock(&dev->object_name_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) drm_gem_object_put(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) * drm_gem_open - implementation of the GEM_OPEN ioctl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) * @dev: drm_device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) * @data: ioctl data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) * @file_priv: drm file-private structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) * Open an object using the global name, returning a handle and the size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) * This handle (of course) holds a reference to the object, so the object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) * will not go away until the handle is deleted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) drm_gem_open_ioctl(struct drm_device *dev, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) struct drm_file *file_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) struct drm_gem_open *args = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) struct drm_gem_object *obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) u32 handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) if (!drm_core_check_feature(dev, DRIVER_GEM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) mutex_lock(&dev->object_name_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) obj = idr_find(&dev->object_name_idr, (int) args->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) if (obj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) drm_gem_object_get(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) mutex_unlock(&dev->object_name_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) args->handle = handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) args->size = obj->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) drm_gem_object_put(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) * gem_gem_open - initalizes GEM file-private structures at devnode open time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) * @dev: drm_device which is being opened by userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) * @file_private: drm file-private structure to set up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) * Called at device open time, sets up the structure for handling refcounting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) * of mm objects.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) idr_init_base(&file_private->object_idr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) spin_lock_init(&file_private->table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * drm_gem_release - release file-private GEM resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) * @dev: drm_device which is being closed by userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) * @file_private: drm file-private structure to clean up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * Called at close time when the filp is going away.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) * Releases any remaining references on objects by this filp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) idr_for_each(&file_private->object_idr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) &drm_gem_object_release_handle, file_private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) idr_destroy(&file_private->object_idr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) * drm_gem_object_release - release GEM buffer object resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) * @obj: GEM buffer object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) * This releases any structures and resources used by @obj and is the invers of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * drm_gem_object_init().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) drm_gem_object_release(struct drm_gem_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) WARN_ON(obj->dma_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) if (obj->filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) fput(obj->filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) dma_resv_fini(&obj->_resv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) drm_gem_free_mmap_offset(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) EXPORT_SYMBOL(drm_gem_object_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) * drm_gem_object_free - free a GEM object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) * @kref: kref of the object to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) * Called after the last reference to the object has been lost.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) * Frees the object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) drm_gem_object_free(struct kref *kref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) struct drm_gem_object *obj =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) container_of(kref, struct drm_gem_object, refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) struct drm_device *dev = obj->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) if (obj->funcs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) obj->funcs->free(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) else if (dev->driver->gem_free_object_unlocked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) dev->driver->gem_free_object_unlocked(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) EXPORT_SYMBOL(drm_gem_object_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * drm_gem_object_put_locked - release a GEM buffer object reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) * @obj: GEM buffer object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) * This releases a reference to @obj. Callers must hold the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) * &drm_device.struct_mutex lock when calling this function, even when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) * driver doesn't use &drm_device.struct_mutex for anything.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) * For drivers not encumbered with legacy locking use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) * drm_gem_object_put() instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) drm_gem_object_put_locked(struct drm_gem_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (obj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) kref_put(&obj->refcount, drm_gem_object_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) EXPORT_SYMBOL(drm_gem_object_put_locked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) * drm_gem_vm_open - vma->ops->open implementation for GEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) * @vma: VM area structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) * This function implements the #vm_operations_struct open() callback for GEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) * drivers. This must be used together with drm_gem_vm_close().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) void drm_gem_vm_open(struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) struct drm_gem_object *obj = vma->vm_private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) drm_gem_object_get(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) EXPORT_SYMBOL(drm_gem_vm_open);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) * drm_gem_vm_close - vma->ops->close implementation for GEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) * @vma: VM area structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) * This function implements the #vm_operations_struct close() callback for GEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) * drivers. This must be used together with drm_gem_vm_open().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) void drm_gem_vm_close(struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) struct drm_gem_object *obj = vma->vm_private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) drm_gem_object_put(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) EXPORT_SYMBOL(drm_gem_vm_close);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) * drm_gem_mmap_obj - memory map a GEM object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) * @obj: the GEM object to map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) * @obj_size: the object size to be mapped, in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) * @vma: VMA for the area to be mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) * provided by the driver. Depending on their requirements, drivers can either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) * provide a fault handler in their gem_vm_ops (in which case any accesses to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) * the object will be trapped, to perform migration, GTT binding, surface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) * register allocation, or performance monitoring), or mmap the buffer memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) * synchronously after calling drm_gem_mmap_obj.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) * This function is mainly intended to implement the DMABUF mmap operation, when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) * the GEM object is not looked up based on its fake offset. To implement the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) * DRM mmap operation, drivers should use the drm_gem_mmap() function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) * callers must verify access restrictions before calling this helper.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) * Return 0 or success or -EINVAL if the object size is smaller than the VMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) * size, or if no gem_vm_ops are provided.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) struct drm_device *dev = obj->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) /* Check for valid size. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) if (obj_size < vma->vm_end - vma->vm_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) /* Take a ref for this mapping of the object, so that the fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) * handler can dereference the mmap offset's pointer to the object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) * This reference is cleaned up by the corresponding vm_close
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) * (which should happen whether the vma was created by this call, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) * by a vm_open due to mremap or partial unmap or whatever).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) drm_gem_object_get(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) vma->vm_private_data = obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) if (obj->funcs && obj->funcs->mmap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) ret = obj->funcs->mmap(obj, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) drm_gem_object_put(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) if (obj->funcs && obj->funcs->vm_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) vma->vm_ops = obj->funcs->vm_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) else if (dev->driver->gem_vm_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) vma->vm_ops = dev->driver->gem_vm_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) drm_gem_object_put(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) EXPORT_SYMBOL(drm_gem_mmap_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) * drm_gem_mmap - memory map routine for GEM objects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) * @filp: DRM file pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) * @vma: VMA for the area to be mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) * If a driver supports GEM object mapping, mmap calls on the DRM file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) * descriptor will end up here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) * contain the fake offset we created when the GTT map ioctl was called on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) * the object) and map it with a call to drm_gem_mmap_obj().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) * If the caller is not granted access to the buffer object, the mmap will fail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) * with EACCES. Please see the vma manager for more information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) struct drm_file *priv = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) struct drm_device *dev = priv->minor->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) struct drm_gem_object *obj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) struct drm_vma_offset_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) if (drm_dev_is_unplugged(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) drm_vma_offset_lock_lookup(dev->vma_offset_manager);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) vma->vm_pgoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) vma_pages(vma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) if (likely(node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) obj = container_of(node, struct drm_gem_object, vma_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) * When the object is being freed, after it hits 0-refcnt it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) * proceeds to tear down the object. In the process it will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) * attempt to remove the VMA offset and so acquire this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) * mgr->vm_lock. Therefore if we find an object with a 0-refcnt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) * that matches our range, we know it is in the process of being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) * destroyed and will be freed as soon as we release the lock -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) * so we have to check for the 0-refcnted object and treat it as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) * invalid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) if (!kref_get_unless_zero(&obj->refcount))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) obj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) if (!obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) if (!drm_vma_node_is_allowed(node, priv)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) drm_gem_object_put(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) if (node->readonly) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) if (vma->vm_flags & VM_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) drm_gem_object_put(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) vma->vm_flags &= ~VM_MAYWRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) drm_gem_object_put(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) EXPORT_SYMBOL(drm_gem_mmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) const struct drm_gem_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) drm_printf_indent(p, indent, "name=%d\n", obj->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) drm_printf_indent(p, indent, "refcount=%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) kref_read(&obj->refcount));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) drm_printf_indent(p, indent, "start=%08lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) drm_vma_node_start(&obj->vma_node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) drm_printf_indent(p, indent, "size=%zu\n", obj->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) drm_printf_indent(p, indent, "imported=%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) obj->import_attach ? "yes" : "no");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) if (obj->funcs && obj->funcs->print_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) obj->funcs->print_info(p, indent, obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) int drm_gem_pin(struct drm_gem_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) if (obj->funcs && obj->funcs->pin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) return obj->funcs->pin(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) else if (obj->dev->driver->gem_prime_pin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) return obj->dev->driver->gem_prime_pin(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) void drm_gem_unpin(struct drm_gem_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) if (obj->funcs && obj->funcs->unpin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) obj->funcs->unpin(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) else if (obj->dev->driver->gem_prime_unpin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) obj->dev->driver->gem_prime_unpin(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) void *drm_gem_vmap(struct drm_gem_object *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) void *vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) if (obj->funcs && obj->funcs->vmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) vaddr = obj->funcs->vmap(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) else if (obj->dev->driver->gem_prime_vmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) vaddr = obj->dev->driver->gem_prime_vmap(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) vaddr = ERR_PTR(-EOPNOTSUPP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) if (!vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) vaddr = ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) return vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) if (!vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) if (obj->funcs && obj->funcs->vunmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) obj->funcs->vunmap(obj, vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) else if (obj->dev->driver->gem_prime_vunmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) obj->dev->driver->gem_prime_vunmap(obj, vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) * drm_gem_lock_reservations - Sets up the ww context and acquires
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) * the lock on an array of GEM objects.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) * Once you've locked your reservations, you'll want to set up space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) * for your shared fences (if applicable), submit your job, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) * drm_gem_unlock_reservations().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) * @objs: drm_gem_objects to lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) * @count: Number of objects in @objs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) * @acquire_ctx: struct ww_acquire_ctx that will be initialized as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) * part of tracking this set of locked reservations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) struct ww_acquire_ctx *acquire_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) int contended = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) ww_acquire_init(acquire_ctx, &reservation_ww_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) if (contended != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) struct drm_gem_object *obj = objs[contended];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) ret = dma_resv_lock_slow_interruptible(obj->resv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) acquire_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) ww_acquire_done(acquire_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) for (i = 0; i < count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) if (i == contended)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) ret = dma_resv_lock_interruptible(objs[i]->resv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) acquire_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) for (j = 0; j < i; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) dma_resv_unlock(objs[j]->resv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) if (contended != -1 && contended >= i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) dma_resv_unlock(objs[contended]->resv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) if (ret == -EDEADLK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) contended = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) ww_acquire_done(acquire_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) ww_acquire_done(acquire_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) EXPORT_SYMBOL(drm_gem_lock_reservations);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) struct ww_acquire_ctx *acquire_ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) for (i = 0; i < count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) dma_resv_unlock(objs[i]->resv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) ww_acquire_fini(acquire_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) EXPORT_SYMBOL(drm_gem_unlock_reservations);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) * drm_gem_fence_array_add - Adds the fence to an array of fences to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) * waited on, deduplicating fences from the same context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) * @fence_array: array of dma_fence * for the job to block on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) * @fence: the dma_fence to add to the list of dependencies.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) * 0 on success, or an error on failing to expand the array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) int drm_gem_fence_array_add(struct xarray *fence_array,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) struct dma_fence *fence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) struct dma_fence *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) unsigned long index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) u32 id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) if (!fence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) /* Deduplicate if we already depend on a fence from the same context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) * This lets the size of the array of deps scale with the number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) * engines involved, rather than the number of BOs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) xa_for_each(fence_array, index, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) if (entry->context != fence->context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) if (dma_fence_is_later(fence, entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) dma_fence_put(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) xa_store(fence_array, index, fence, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) dma_fence_put(fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) dma_fence_put(fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) EXPORT_SYMBOL(drm_gem_fence_array_add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) * drm_gem_fence_array_add_implicit - Adds the implicit dependencies tracked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) * in the GEM object's reservation object to an array of dma_fences for use in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) * scheduling a rendering job.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) * This should be called after drm_gem_lock_reservations() on your array of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) * GEM objects used in the job but before updating the reservations with your
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) * own fences.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) * @fence_array: array of dma_fence * for the job to block on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) * @obj: the gem object to add new dependencies from.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) * @write: whether the job might write the object (so we need to depend on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) * shared fences in the reservation object).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) struct drm_gem_object *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) bool write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) struct dma_fence **fences;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) unsigned int i, fence_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) if (!write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) struct dma_fence *fence =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) dma_resv_get_excl_rcu(obj->resv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) return drm_gem_fence_array_add(fence_array, fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) ret = dma_resv_get_fences_rcu(obj->resv, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) &fence_count, &fences);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) if (ret || !fence_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) for (i = 0; i < fence_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) ret = drm_gem_fence_array_add(fence_array, fences[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) for (; i < fence_count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) dma_fence_put(fences[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) kfree(fences);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) EXPORT_SYMBOL(drm_gem_fence_array_add_implicit);