^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) #ifndef _DRM_DEVICE_H_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #define _DRM_DEVICE_H_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/kref.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/idr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <drm/drm_hashtab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <drm/drm_mode_config.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) struct drm_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) struct drm_minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) struct drm_master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) struct drm_device_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) struct drm_vblank_crtc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) struct drm_sg_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) struct drm_local_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) struct drm_vma_offset_manager;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) struct drm_vram_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) struct drm_fb_helper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) struct inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) struct pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) struct pci_controller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * enum drm_switch_power - power state of drm device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) enum switch_power_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /** @DRM_SWITCH_POWER_ON: Power state is ON */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) DRM_SWITCH_POWER_ON = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /** @DRM_SWITCH_POWER_OFF: Power state is OFF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) DRM_SWITCH_POWER_OFF = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /** @DRM_SWITCH_POWER_CHANGING: Power state is changing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) DRM_SWITCH_POWER_CHANGING = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /** @DRM_SWITCH_POWER_DYNAMIC_OFF: Suspended */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) DRM_SWITCH_POWER_DYNAMIC_OFF = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * struct drm_device - DRM device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * This structure represent a complete card that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * may contain multiple heads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct drm_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * @legacy_dev_list:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * List of devices per driver for stealth attach cleanup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct list_head legacy_dev_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /** @if_version: Highest interface version set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) int if_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) /** @ref: Object ref-count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct kref ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /** @dev: Device structure of bus-device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * @managed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * Managed resources linked to the lifetime of this &drm_device as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * tracked by @ref.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /** @managed.resources: managed resources list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct list_head resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /** @managed.final_kfree: pointer for final kfree() call */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) void *final_kfree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /** @managed.lock: protects @managed.resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) } managed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /** @driver: DRM driver managing the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct drm_driver *driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * @dev_private:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * DRM driver private data. This is deprecated and should be left set to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * Instead of using this pointer it is recommended that drivers use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * devm_drm_dev_alloc() and embed struct &drm_device in their larger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * per-device structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) void *dev_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /** @primary: Primary node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct drm_minor *primary;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /** @render: Render node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct drm_minor *render;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * @registered:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * Internally used by drm_dev_register() and drm_connector_register().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) bool registered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * @master:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * Currently active master for this device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * Protected by &master_mutex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct drm_master *master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * @driver_features: per-device driver features
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * Drivers can clear specific flags here to disallow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * certain features on a per-device basis while still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * sharing a single &struct drm_driver instance across
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * all devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) u32 driver_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * @unplugged:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * Flag to tell if the device has been unplugged.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * See drm_dev_enter() and drm_dev_is_unplugged().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) bool unplugged;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /** @anon_inode: inode for private address-space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct inode *anon_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /** @unique: Unique name of the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) char *unique;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * @struct_mutex:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * Lock for others (not &drm_minor.master and &drm_file.is_master)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * WARNING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * Only drivers annotated with DRIVER_LEGACY should be using this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct mutex struct_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * @master_mutex:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * Lock for &drm_minor.master and &drm_file.is_master
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct mutex master_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * @open_count:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * Usage counter for outstanding files open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * protected by drm_global_mutex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) atomic_t open_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /** @filelist_mutex: Protects @filelist. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct mutex filelist_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * @filelist:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * List of userspace clients, linked through &drm_file.lhead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) struct list_head filelist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * @filelist_internal:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * List of open DRM files for in-kernel clients.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * Protected by &filelist_mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) struct list_head filelist_internal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * @clientlist_mutex:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * Protects &clientlist access.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct mutex clientlist_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * @clientlist:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * List of in-kernel clients. Protected by &clientlist_mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) struct list_head clientlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * @irq_enabled:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * Indicates that interrupt handling is enabled, specifically vblank
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * handling. Drivers which don't use drm_irq_install() need to set this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * to true manually.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) bool irq_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * @irq: Used by the drm_irq_install() and drm_irq_unistall() helpers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * @vblank_disable_immediate:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * If true, vblank interrupt will be disabled immediately when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * refcount drops to zero, as opposed to via the vblank disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * This can be set to true it the hardware has a working vblank counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * with high-precision timestamping (otherwise there are races) and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * driver uses drm_crtc_vblank_on() and drm_crtc_vblank_off()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * appropriately. See also @max_vblank_count and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * &drm_crtc_funcs.get_vblank_counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) bool vblank_disable_immediate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * @vblank:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * Array of vblank tracking structures, one per &struct drm_crtc. For
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * historical reasons (vblank support predates kernel modesetting) this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * is free-standing and not part of &struct drm_crtc itself. It must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * initialized explicitly by calling drm_vblank_init().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) struct drm_vblank_crtc *vblank;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * @vblank_time_lock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * Protects vblank count and time updates during vblank enable/disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) spinlock_t vblank_time_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * @vbl_lock: Top-level vblank references lock, wraps the low-level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * @vblank_time_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) spinlock_t vbl_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * @max_vblank_count:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * Maximum value of the vblank registers. This value +1 will result in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * wrap-around of the vblank register. It is used by the vblank core to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * handle wrap-arounds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * If set to zero the vblank core will try to guess the elapsed vblanks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * between times when the vblank interrupt is disabled through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * high-precision timestamps. That approach is suffering from small
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * races and imprecision over longer time periods, hence exposing a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * hardware vblank counter is always recommended.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * This is the statically configured device wide maximum. The driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * can instead choose to use a runtime configurable per-crtc value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * &drm_vblank_crtc.max_vblank_count, in which case @max_vblank_count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * must be left at zero. See drm_crtc_set_max_vblank_count() on how
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * to use the per-crtc value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * If non-zero, &drm_crtc_funcs.get_vblank_counter must be set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) u32 max_vblank_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /** @vblank_event_list: List of vblank events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) struct list_head vblank_event_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * @event_lock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * Protects @vblank_event_list and event delivery in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * general. See drm_send_event() and drm_send_event_locked().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) spinlock_t event_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /** @agp: AGP data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) struct drm_agp_head *agp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) /** @pdev: PCI device structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct pci_dev *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) #ifdef __alpha__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /** @hose: PCI hose, only used on ALPHA platforms. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct pci_controller *hose;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /** @num_crtcs: Number of CRTCs on this device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) unsigned int num_crtcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /** @mode_config: Current mode config */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) struct drm_mode_config mode_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /** @object_name_lock: GEM information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) struct mutex object_name_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /** @object_name_idr: GEM information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) struct idr object_name_idr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) /** @vma_offset_manager: GEM information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) struct drm_vma_offset_manager *vma_offset_manager;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /** @vram_mm: VRAM MM memory manager */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct drm_vram_mm *vram_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * @switch_power_state:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * Power state of the client.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * Used by drivers supporting the switcheroo driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * The state is maintained in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * &vga_switcheroo_client_ops.set_gpu_state callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) enum switch_power_state switch_power_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * @fb_helper:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * Pointer to the fbdev emulation structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * Set by drm_fb_helper_init() and cleared by drm_fb_helper_fini().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) struct drm_fb_helper *fb_helper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) /* Everything below here is for legacy driver, never use! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) /* private: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) #if IS_ENABLED(CONFIG_DRM_LEGACY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /* Context handle management - linked list of context handles */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) struct list_head ctxlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) /* Context handle management - mutex for &ctxlist */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) struct mutex ctxlist_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /* Context handle management */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) struct idr ctx_idr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) /* Memory management - linked list of regions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct list_head maplist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) /* Memory management - user token hash table for maps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) struct drm_open_hash map_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /* Context handle management - list of vmas (for debugging) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) struct list_head vmalist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) /* Optional pointer for DMA support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) struct drm_device_dma *dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) /* Context swapping flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) __volatile__ long context_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) /* Last current context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) int last_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) /* Lock for &buf_use and a few other things. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) spinlock_t buf_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /* Usage counter for buffers in use -- cannot alloc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) int buf_use;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) /* Buffer allocation in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) atomic_t buf_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) int context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) struct drm_hw_lock *lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) } sigdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) struct drm_local_map *agp_buffer_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) unsigned int agp_buffer_token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) /* Scatter gather memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) struct drm_sg_mem *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) #endif