^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2009-2010, Code Aurora Forum.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright 2016 Intel Corp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Permission is hereby granted, free of charge, to any person obtaining a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * copy of this software and associated documentation files (the "Software"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * to deal in the Software without restriction, including without limitation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * the rights to use, copy, modify, merge, publish, distribute, sublicense,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * and/or sell copies of the Software, and to permit persons to whom the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Software is furnished to do so, subject to the following conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * The above copyright notice and this permission notice (including the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * paragraph) shall be included in all copies or substantial portions of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * OTHER DEALINGS IN THE SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #ifndef _DRM_DRV_H_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define _DRM_DRV_H_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/irqreturn.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/uuid.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <drm/drm_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct drm_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct drm_gem_object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct drm_master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct drm_minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct dma_buf_attachment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct drm_display_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct drm_mode_create_dumb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct drm_printer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * enum drm_driver_feature - feature flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * See &drm_driver.driver_features, drm_device.driver_features and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * drm_core_check_feature().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) enum drm_driver_feature {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * @DRIVER_GEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * Driver use the GEM memory manager. This should be set for all modern
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * drivers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) DRIVER_GEM = BIT(0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * @DRIVER_MODESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * Driver supports mode setting interfaces (KMS).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) DRIVER_MODESET = BIT(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * @DRIVER_RENDER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * Driver supports dedicated render nodes. See also the :ref:`section on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * render nodes <drm_render_node>` for details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) DRIVER_RENDER = BIT(3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * @DRIVER_ATOMIC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * Driver supports the full atomic modesetting userspace API. Drivers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * which only use atomic internally, but do not the support the full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * userspace API (e.g. not all properties converted to atomic, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * multi-plane updates are not guaranteed to be tear-free) should not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * set this flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) DRIVER_ATOMIC = BIT(4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * @DRIVER_SYNCOBJ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * Driver supports &drm_syncobj for explicit synchronization of command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * submission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) DRIVER_SYNCOBJ = BIT(5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * @DRIVER_SYNCOBJ_TIMELINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * Driver supports the timeline flavor of &drm_syncobj for explicit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * synchronization of command submission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) DRIVER_SYNCOBJ_TIMELINE = BIT(6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) /* IMPORTANT: Below are all the legacy flags, add new ones above. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * @DRIVER_USE_AGP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * Set up DRM AGP support, see drm_agp_init(), the DRM core will manage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * AGP resources. New drivers don't need this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) DRIVER_USE_AGP = BIT(25),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * @DRIVER_LEGACY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * Denote a legacy driver using shadow attach. Do not use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) DRIVER_LEGACY = BIT(26),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * @DRIVER_PCI_DMA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * Driver is capable of PCI DMA, mapping of PCI DMA buffers to userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * will be enabled. Only for legacy drivers. Do not use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) DRIVER_PCI_DMA = BIT(27),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * @DRIVER_SG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * Driver can perform scatter/gather DMA, allocation and mapping of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * scatter/gather buffers will be enabled. Only for legacy drivers. Do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * not use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) DRIVER_SG = BIT(28),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * @DRIVER_HAVE_DMA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * Driver supports DMA, the userspace DMA API will be supported. Only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * for legacy drivers. Do not use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) DRIVER_HAVE_DMA = BIT(29),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * @DRIVER_HAVE_IRQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * Legacy irq support. Only for legacy drivers. Do not use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * New drivers can either use the drm_irq_install() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * drm_irq_uninstall() helper functions, or roll their own irq support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * code by calling request_irq() directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) DRIVER_HAVE_IRQ = BIT(30),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * @DRIVER_KMS_LEGACY_CONTEXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * Used only by nouveau for backwards compatibility with existing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * userspace. Do not use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) DRIVER_KMS_LEGACY_CONTEXT = BIT(31),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * struct drm_driver - DRM driver structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * This structure represent the common code for a family of cards. There will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * one &struct drm_device for each card present in this family. It contains lots
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * of vfunc entries, and a pile of those probably should be moved to more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * appropriate places like &drm_mode_config_funcs or into a new operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * structure for GEM drivers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct drm_driver {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * @load:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * Backward-compatible driver callback to complete initialization steps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * after the driver is registered. For this reason, may suffer from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * race conditions and its use is deprecated for new drivers. It is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * therefore only supported for existing drivers not yet converted to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * the new scheme. See devm_drm_dev_alloc() and drm_dev_register() for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * proper and race-free way to set up a &struct drm_device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * This is deprecated, do not use!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * Zero on success, non-zero value on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) int (*load) (struct drm_device *, unsigned long flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * @open:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * Driver callback when a new &struct drm_file is opened. Useful for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * setting up driver-private data structures like buffer allocators,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * execution contexts or similar things. Such driver-private resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * must be released again in @postclose.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * Since the display/modeset side of DRM can only be owned by exactly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * one &struct drm_file (see &drm_file.is_master and &drm_device.master)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * there should never be a need to set up any modeset related resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * in this callback. Doing so would be a driver design bug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * 0 on success, a negative error code on failure, which will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * promoted to userspace as the result of the open() system call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) int (*open) (struct drm_device *, struct drm_file *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * @postclose:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * One of the driver callbacks when a new &struct drm_file is closed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * Useful for tearing down driver-private data structures allocated in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * @open like buffer allocators, execution contexts or similar things.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * Since the display/modeset side of DRM can only be owned by exactly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * one &struct drm_file (see &drm_file.is_master and &drm_device.master)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * there should never be a need to tear down any modeset related
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * resources in this callback. Doing so would be a driver design bug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) void (*postclose) (struct drm_device *, struct drm_file *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * @lastclose:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * Called when the last &struct drm_file has been closed and there's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * currently no userspace client for the &struct drm_device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * Modern drivers should only use this to force-restore the fbdev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * framebuffer using drm_fb_helper_restore_fbdev_mode_unlocked().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * Anything else would indicate there's something seriously wrong.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * Modern drivers can also use this to execute delayed power switching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * state changes, e.g. in conjunction with the :ref:`vga_switcheroo`
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * infrastructure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * This is called after @postclose hook has been called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * NOTE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * All legacy drivers use this callback to de-initialize the hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * This is purely because of the shadow-attach model, where the DRM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * kernel driver does not really own the hardware. Instead ownershipe is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * handled with the help of userspace through an inheritedly racy dance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * to set/unset the VT into raw mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * Legacy drivers initialize the hardware in the @firstopen callback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * which isn't even called for modern drivers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) void (*lastclose) (struct drm_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * @unload:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * Reverse the effects of the driver load callback. Ideally,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * the clean up performed by the driver should happen in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * reverse order of the initialization. Similarly to the load
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * hook, this handler is deprecated and its usage should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * dropped in favor of an open-coded teardown function at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * driver layer. See drm_dev_unregister() and drm_dev_put()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * for the proper way to remove a &struct drm_device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * The unload() hook is called right after unregistering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) void (*unload) (struct drm_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * @release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * Optional callback for destroying device data after the final
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * reference is released, i.e. the device is being destroyed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * This is deprecated, clean up all memory allocations associated with a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * &drm_device using drmm_add_action(), drmm_kmalloc() and related
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * managed resources functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) void (*release) (struct drm_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * @irq_handler:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * Interrupt handler called when using drm_irq_install(). Not used by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * drivers which implement their own interrupt handling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) irqreturn_t(*irq_handler) (int irq, void *arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * @irq_preinstall:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * Optional callback used by drm_irq_install() which is called before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * the interrupt handler is registered. This should be used to clear out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * any pending interrupts (from e.g. firmware based drives) and reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * the interrupt handling registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) void (*irq_preinstall) (struct drm_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * @irq_postinstall:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * Optional callback used by drm_irq_install() which is called after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * the interrupt handler is registered. This should be used to enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * interrupt generation in the hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) int (*irq_postinstall) (struct drm_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * @irq_uninstall:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * Optional callback used by drm_irq_uninstall() which is called before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * the interrupt handler is unregistered. This should be used to disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * interrupt generation in the hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) void (*irq_uninstall) (struct drm_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * @master_set:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * Called whenever the minor master is set. Only used by vmwgfx.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) void (*master_set)(struct drm_device *dev, struct drm_file *file_priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) bool from_open);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * @master_drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * Called whenever the minor master is dropped. Only used by vmwgfx.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * @debugfs_init:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * Allows drivers to create driver-specific debugfs files.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) void (*debugfs_init)(struct drm_minor *minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * @gem_free_object_unlocked: deconstructor for drm_gem_objects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * This is deprecated and should not be used by new drivers. Use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * &drm_gem_object_funcs.free instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) void (*gem_free_object_unlocked) (struct drm_gem_object *obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * @gem_open_object:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * This callback is deprecated in favour of &drm_gem_object_funcs.open.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * Driver hook called upon gem handle creation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * @gem_close_object:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * This callback is deprecated in favour of &drm_gem_object_funcs.close.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * Driver hook called upon gem handle release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * @gem_create_object: constructor for gem objects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * Hook for allocating the GEM object struct, for use by the CMA and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * SHMEM GEM helpers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) struct drm_gem_object *(*gem_create_object)(struct drm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) size_t size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * @prime_handle_to_fd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * Main PRIME export function. Should be implemented with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * drm_gem_prime_handle_to_fd() for GEM based drivers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * For an in-depth discussion see :ref:`PRIME buffer sharing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * documentation <prime_buffer_sharing>`.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) uint32_t handle, uint32_t flags, int *prime_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * @prime_fd_to_handle:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * Main PRIME import function. Should be implemented with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * drm_gem_prime_fd_to_handle() for GEM based drivers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * For an in-depth discussion see :ref:`PRIME buffer sharing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * documentation <prime_buffer_sharing>`.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) int prime_fd, uint32_t *handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * @gem_prime_export:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * Export hook for GEM drivers. Deprecated in favour of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * &drm_gem_object_funcs.export.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) struct dma_buf * (*gem_prime_export)(struct drm_gem_object *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) int flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * @gem_prime_import:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * Import hook for GEM drivers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * This defaults to drm_gem_prime_import() if not set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) struct dma_buf *dma_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * @gem_prime_pin:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * Deprecated hook in favour of &drm_gem_object_funcs.pin.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) int (*gem_prime_pin)(struct drm_gem_object *obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * @gem_prime_unpin:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * Deprecated hook in favour of &drm_gem_object_funcs.unpin.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) void (*gem_prime_unpin)(struct drm_gem_object *obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * @gem_prime_get_sg_table:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * Deprecated hook in favour of &drm_gem_object_funcs.get_sg_table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * @gem_prime_import_sg_table:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * Optional hook used by the PRIME helper functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * drm_gem_prime_import() respectively drm_gem_prime_import_dev().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) struct drm_gem_object *(*gem_prime_import_sg_table)(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) struct drm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) struct dma_buf_attachment *attach,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) struct sg_table *sgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * @gem_prime_vmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * Deprecated vmap hook for GEM drivers. Please use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * &drm_gem_object_funcs.vmap instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) void *(*gem_prime_vmap)(struct drm_gem_object *obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * @gem_prime_vunmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * Deprecated vunmap hook for GEM drivers. Please use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * &drm_gem_object_funcs.vunmap instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) void (*gem_prime_vunmap)(struct drm_gem_object *obj, void *vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * @gem_prime_mmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * mmap hook for GEM drivers, used to implement dma-buf mmap in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * PRIME helpers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * FIXME: There's way too much duplication going on here, and also moved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * to &drm_gem_object_funcs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) int (*gem_prime_mmap)(struct drm_gem_object *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) struct vm_area_struct *vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * @gem_prime_get_uuid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * get_uuid hook for GEM drivers. Retrieves the virtio uuid of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * given GEM buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) int (*gem_prime_get_uuid)(struct drm_gem_object *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) uuid_t *uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * @dumb_create:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * This creates a new dumb buffer in the driver's backing storage manager (GEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * TTM or something else entirely) and returns the resulting buffer handle. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * handle can then be wrapped up into a framebuffer modeset object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * Note that userspace is not allowed to use such objects for render
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * acceleration - drivers must create their own private ioctls for such a use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * Width, height and depth are specified in the &drm_mode_create_dumb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * argument. The callback needs to fill the handle, pitch and size for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * the created buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * Called by the user via ioctl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * Zero on success, negative errno on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) int (*dumb_create)(struct drm_file *file_priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) struct drm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) struct drm_mode_create_dumb *args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * @dumb_map_offset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * Allocate an offset in the drm device node's address space to be able to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * memory map a dumb buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * The default implementation is drm_gem_create_mmap_offset(). GEM based
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * drivers must not overwrite this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * Called by the user via ioctl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * Zero on success, negative errno on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) int (*dumb_map_offset)(struct drm_file *file_priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) struct drm_device *dev, uint32_t handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) uint64_t *offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * @dumb_destroy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * This destroys the userspace handle for the given dumb backing storage buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * Since buffer objects must be reference counted in the kernel a buffer object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) * won't be immediately freed if a framebuffer modeset object still uses it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * Called by the user via ioctl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * The default implementation is drm_gem_dumb_destroy(). GEM based drivers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * must not overwrite this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * Zero on success, negative errno on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) int (*dumb_destroy)(struct drm_file *file_priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) struct drm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) uint32_t handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * @gem_vm_ops: Driver private ops for this object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * For GEM drivers this is deprecated in favour of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * &drm_gem_object_funcs.vm_ops.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) const struct vm_operations_struct *gem_vm_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) /** @major: driver major number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) int major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) /** @minor: driver minor number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) int minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) /** @patchlevel: driver patch level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) int patchlevel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) /** @name: driver name */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) /** @desc: driver description */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) char *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) /** @date: driver date */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) char *date;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * @driver_features:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * Driver features, see &enum drm_driver_feature. Drivers can disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * some features on a per-instance basis using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * &drm_device.driver_features.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) u32 driver_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * @ioctls:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * Array of driver-private IOCTL description entries. See the chapter on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * :ref:`IOCTL support in the userland interfaces
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * chapter<drm_driver_ioctl>` for the full details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) const struct drm_ioctl_desc *ioctls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) /** @num_ioctls: Number of entries in @ioctls. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) int num_ioctls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) * @fops:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) * File operations for the DRM device node. See the discussion in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * :ref:`file operations<drm_driver_fops>` for in-depth coverage and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * some examples.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) const struct file_operations *fops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) /* Everything below here is for legacy driver, never use! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) /* private: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) /* List of devices hanging off this driver with stealth attach. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) struct list_head legacy_dev_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) int (*firstopen) (struct drm_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) void (*preclose) (struct drm_device *, struct drm_file *file_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) int (*dma_quiescent) (struct drm_device *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) int (*context_dtor) (struct drm_device *dev, int context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) u32 (*get_vblank_counter)(struct drm_device *dev, unsigned int pipe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) int (*enable_vblank)(struct drm_device *dev, unsigned int pipe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) void (*disable_vblank)(struct drm_device *dev, unsigned int pipe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) int dev_priv_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) void *__devm_drm_dev_alloc(struct device *parent, struct drm_driver *driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) size_t size, size_t offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * devm_drm_dev_alloc - Resource managed allocation of a &drm_device instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * @parent: Parent device object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) * @driver: DRM driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) * @type: the type of the struct which contains struct &drm_device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) * @member: the name of the &drm_device within @type.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) * This allocates and initialize a new DRM device. No device registration is done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * Call drm_dev_register() to advertice the device to user space and register it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * with other core subsystems. This should be done last in the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * initialization sequence to make sure userspace can't access an inconsistent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * The initial ref-count of the object is 1. Use drm_dev_get() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * drm_dev_put() to take and drop further ref-counts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * It is recommended that drivers embed &struct drm_device into their own device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) * Note that this manages the lifetime of the resulting &drm_device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * automatically using devres. The DRM device initialized with this function is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * automatically put on driver detach using drm_dev_put().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * Pointer to new DRM device, or ERR_PTR on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) #define devm_drm_dev_alloc(parent, driver, type, member) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) ((type *) __devm_drm_dev_alloc(parent, driver, sizeof(type), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) offsetof(type, member)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) struct drm_device *drm_dev_alloc(struct drm_driver *driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) struct device *parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) int drm_dev_register(struct drm_device *dev, unsigned long flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) void drm_dev_unregister(struct drm_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) void drm_dev_get(struct drm_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) void drm_dev_put(struct drm_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) void drm_put_dev(struct drm_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) bool drm_dev_enter(struct drm_device *dev, int *idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) void drm_dev_exit(int idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) void drm_dev_unplug(struct drm_device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) * drm_dev_is_unplugged - is a DRM device unplugged
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) * @dev: DRM device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) * This function can be called to check whether a hotpluggable is unplugged.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) * Unplugging itself is singalled through drm_dev_unplug(). If a device is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) * unplugged, these two functions guarantee that any store before calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * drm_dev_unplug() is visible to callers of this function after it completes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) * WARNING: This function fundamentally races against drm_dev_unplug(). It is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * recommended that drivers instead use the underlying drm_dev_enter() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * drm_dev_exit() function pairs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) static inline bool drm_dev_is_unplugged(struct drm_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (drm_dev_enter(dev, &idx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) drm_dev_exit(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) * drm_core_check_all_features - check driver feature flags mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) * @dev: DRM device to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) * @features: feature flag(s) mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * This checks @dev for driver features, see &drm_driver.driver_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * &drm_device.driver_features, and the various &enum drm_driver_feature flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * Returns true if all features in the @features mask are supported, false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) static inline bool drm_core_check_all_features(const struct drm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) u32 features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) u32 supported = dev->driver->driver_features & dev->driver_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) return features && (supported & features) == features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) * drm_core_check_feature - check driver feature flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) * @dev: DRM device to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) * @feature: feature flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) * This checks @dev for driver features, see &drm_driver.driver_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * &drm_device.driver_features, and the various &enum drm_driver_feature flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) * Returns true if the @feature is supported, false otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) static inline bool drm_core_check_feature(const struct drm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) enum drm_driver_feature feature)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) return drm_core_check_all_features(dev, feature);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) * drm_drv_uses_atomic_modeset - check if the driver implements
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) * atomic_commit()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) * @dev: DRM device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * This check is useful if drivers do not have DRIVER_ATOMIC set but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) * have atomic modesetting internally implemented.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) return drm_core_check_feature(dev, DRIVER_ATOMIC) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) (dev->mode_config.funcs && dev->mode_config.funcs->atomic_commit != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) int drm_dev_set_unique(struct drm_device *dev, const char *name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) #endif