^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright © 2014-2015 Broadcom
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Permission is hereby granted, free of charge, to any person obtaining a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * copy of this software and associated documentation files (the "Software"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * to deal in the Software without restriction, including without limitation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * the rights to use, copy, modify, merge, publish, distribute, sublicense,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * and/or sell copies of the Software, and to permit persons to whom the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Software is furnished to do so, subject to the following conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * The above copyright notice and this permission notice (including the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * paragraph) shall be included in all copies or substantial portions of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * IN THE SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #ifndef _UAPI_VC4_DRM_H_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define _UAPI_VC4_DRM_H_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "drm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #if defined(__cplusplus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) extern "C" {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define DRM_VC4_SUBMIT_CL 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define DRM_VC4_WAIT_SEQNO 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define DRM_VC4_WAIT_BO 0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define DRM_VC4_CREATE_BO 0x03
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define DRM_VC4_MMAP_BO 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define DRM_VC4_CREATE_SHADER_BO 0x05
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define DRM_VC4_GET_HANG_STATE 0x06
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define DRM_VC4_GET_PARAM 0x07
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define DRM_VC4_SET_TILING 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define DRM_VC4_GET_TILING 0x09
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define DRM_VC4_LABEL_BO 0x0a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define DRM_VC4_GEM_MADVISE 0x0b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define DRM_VC4_PERFMON_CREATE 0x0c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define DRM_VC4_PERFMON_DESTROY 0x0d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define DRM_VC4_PERFMON_GET_VALUES 0x0e
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define DRM_IOCTL_VC4_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define DRM_IOCTL_VC4_WAIT_SEQNO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define DRM_IOCTL_VC4_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_BO, struct drm_vc4_wait_bo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define DRM_IOCTL_VC4_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_BO, struct drm_vc4_create_bo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define DRM_IOCTL_VC4_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_MMAP_BO, struct drm_vc4_mmap_bo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define DRM_IOCTL_VC4_CREATE_SHADER_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_SHADER_BO, struct drm_vc4_create_shader_bo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define DRM_IOCTL_VC4_GET_HANG_STATE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_HANG_STATE, struct drm_vc4_get_hang_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define DRM_IOCTL_VC4_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_PARAM, struct drm_vc4_get_param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define DRM_IOCTL_VC4_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SET_TILING, struct drm_vc4_set_tiling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define DRM_IOCTL_VC4_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_TILING, struct drm_vc4_get_tiling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define DRM_IOCTL_VC4_LABEL_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_LABEL_BO, struct drm_vc4_label_bo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define DRM_IOCTL_VC4_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GEM_MADVISE, struct drm_vc4_gem_madvise)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define DRM_IOCTL_VC4_PERFMON_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_PERFMON_CREATE, struct drm_vc4_perfmon_create)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define DRM_IOCTL_VC4_PERFMON_DESTROY DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_PERFMON_DESTROY, struct drm_vc4_perfmon_destroy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define DRM_IOCTL_VC4_PERFMON_GET_VALUES DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_PERFMON_GET_VALUES, struct drm_vc4_perfmon_get_values)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct drm_vc4_submit_rcl_surface {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) __u32 hindex; /* Handle index, or ~0 if not present. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) __u32 offset; /* Offset to start of buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * Bits for either render config (color_write) or load/store packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * Bits should all be 0 for MSAA load/stores.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) __u16 bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) __u16 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * struct drm_vc4_submit_cl - ioctl argument for submitting commands to the 3D
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * engine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * Drivers typically use GPU BOs to store batchbuffers / command lists and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * their associated state. However, because the VC4 lacks an MMU, we have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * do validation of memory accesses by the GPU commands. If we were to store
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * our commands in BOs, we'd need to do uncached readback from them to do the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * validation process, which is too expensive. Instead, userspace accumulates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * commands and associated state in plain memory, then the kernel copies the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * data to its own address space, and then validates and stores it in a GPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * BO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct drm_vc4_submit_cl {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /* Pointer to the binner command list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * This is the first set of commands executed, which runs the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * coordinate shader to determine where primitives land on the screen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * then writes out the state updates and draw calls necessary per tile
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * to the tile allocation BO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) __u64 bin_cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /* Pointer to the shader records.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * Shader records are the structures read by the hardware that contain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * pointers to uniforms, shaders, and vertex attributes. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * reference to the shader record has enough information to determine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * how many pointers are necessary (fixed number for shaders/uniforms,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * and an attribute count), so those BO indices into bo_handles are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * just stored as __u32s before each shader record passed in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) __u64 shader_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /* Pointer to uniform data and texture handles for the textures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * referenced by the shader.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * For each shader state record, there is a set of uniform data in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * order referenced by the record (FS, VS, then CS). Each set of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * uniform data has a __u32 index into bo_handles per texture
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * sample operation, in the order the QPU_W_TMUn_S writes appear in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * the program. Following the texture BO handle indices is the actual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * uniform data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * The individual uniform state blocks don't have sizes passed in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * because the kernel has to determine the sizes anyway during shader
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * code validation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) __u64 uniforms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) __u64 bo_handles;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /* Size in bytes of the binner command list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) __u32 bin_cl_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /* Size in bytes of the set of shader records. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) __u32 shader_rec_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /* Number of shader records.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * This could just be computed from the contents of shader_records and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * the address bits of references to them from the bin CL, but it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * keeps the kernel from having to resize some allocations it makes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) __u32 shader_rec_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /* Size in bytes of the uniform state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) __u32 uniforms_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /* Number of BO handles passed in (size is that times 4). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) __u32 bo_handle_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /* RCL setup: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) __u16 width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) __u16 height;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) __u8 min_x_tile;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) __u8 min_y_tile;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) __u8 max_x_tile;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) __u8 max_y_tile;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct drm_vc4_submit_rcl_surface color_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) struct drm_vc4_submit_rcl_surface color_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct drm_vc4_submit_rcl_surface zs_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct drm_vc4_submit_rcl_surface zs_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct drm_vc4_submit_rcl_surface msaa_color_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) struct drm_vc4_submit_rcl_surface msaa_zs_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) __u32 clear_color[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) __u32 clear_z;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) __u8 clear_s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) __u32 pad:24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) #define VC4_SUBMIT_CL_USE_CLEAR_COLOR (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /* By default, the kernel gets to choose the order that the tiles are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * rendered in. If this is set, then the tiles will be rendered in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * raster order, with the right-to-left vs left-to-right and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * top-to-bottom vs bottom-to-top dictated by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * VC4_SUBMIT_CL_RCL_ORDER_INCREASING_*. This allows overlapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * blits to be implemented using the 3D engine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #define VC4_SUBMIT_CL_FIXED_RCL_ORDER (1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) #define VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X (1 << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #define VC4_SUBMIT_CL_RCL_ORDER_INCREASING_Y (1 << 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) __u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) /* Returned value of the seqno of this render job (for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * wait ioctl).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) __u64 seqno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /* ID of the perfmon to attach to this job. 0 means no perfmon. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) __u32 perfmonid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) /* Syncobj handle to wait on. If set, processing of this render job
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * will not start until the syncobj is signaled. 0 means ignore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) __u32 in_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /* Syncobj handle to export fence to. If set, the fence in the syncobj
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * will be replaced with a fence that signals upon completion of this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * render job. 0 means ignore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) __u32 out_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) __u32 pad2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * struct drm_vc4_wait_seqno - ioctl argument for waiting for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * DRM_VC4_SUBMIT_CL completion using its returned seqno.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * timeout_ns is the timeout in nanoseconds, where "0" means "don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * block, just return the status."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) struct drm_vc4_wait_seqno {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) __u64 seqno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) __u64 timeout_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * struct drm_vc4_wait_bo - ioctl argument for waiting for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * completion of the last DRM_VC4_SUBMIT_CL on a BO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * This is useful for cases where multiple processes might be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * rendering to a BO and you want to wait for all rendering to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct drm_vc4_wait_bo {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) __u32 handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) __u32 pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) __u64 timeout_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * struct drm_vc4_create_bo - ioctl argument for creating VC4 BOs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * There are currently no values for the flags argument, but it may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * used in a future extension.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct drm_vc4_create_bo {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) __u32 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) __u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /** Returned GEM handle for the BO. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) __u32 handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) __u32 pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * struct drm_vc4_mmap_bo - ioctl argument for mapping VC4 BOs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * This doesn't actually perform an mmap. Instead, it returns the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * offset you need to use in an mmap on the DRM device node. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * means that tools like valgrind end up knowing about the mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * There are currently no values for the flags argument, but it may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * used in a future extension.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) struct drm_vc4_mmap_bo {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) /** Handle for the object being mapped. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) __u32 handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) __u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) /** offset into the drm node to use for subsequent mmap call. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) __u64 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * struct drm_vc4_create_shader_bo - ioctl argument for creating VC4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * shader BOs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * Since allowing a shader to be overwritten while it's also being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * executed from would allow privlege escalation, shaders must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * created using this ioctl, and they can't be mmapped later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) struct drm_vc4_create_shader_bo {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) /* Size of the data argument. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) __u32 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /* Flags, currently must be 0. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) __u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) /* Pointer to the data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) __u64 data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) /** Returned GEM handle for the BO. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) __u32 handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /* Pad, must be 0. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) __u32 pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) struct drm_vc4_get_hang_state_bo {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) __u32 handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) __u32 paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) __u32 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) __u32 pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * struct drm_vc4_hang_state - ioctl argument for collecting state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * from a GPU hang for analysis.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct drm_vc4_get_hang_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) /** Pointer to array of struct drm_vc4_get_hang_state_bo. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) __u64 bo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * On input, the size of the bo array. Output is the number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * of bos to be returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) __u32 bo_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) __u32 start_bin, start_render;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) __u32 ct0ca, ct0ea;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) __u32 ct1ca, ct1ea;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) __u32 ct0cs, ct1cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) __u32 ct0ra0, ct1ra0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) __u32 bpca, bpcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) __u32 bpoa, bpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) __u32 vpmbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) __u32 dbge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) __u32 fdbgo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) __u32 fdbgb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) __u32 fdbgr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) __u32 fdbgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) __u32 errstat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) /* Pad that we may save more registers into in the future. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) __u32 pad[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) #define DRM_VC4_PARAM_V3D_IDENT0 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) #define DRM_VC4_PARAM_V3D_IDENT1 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) #define DRM_VC4_PARAM_V3D_IDENT2 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) #define DRM_VC4_PARAM_SUPPORTS_BRANCHES 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) #define DRM_VC4_PARAM_SUPPORTS_ETC1 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) #define DRM_VC4_PARAM_SUPPORTS_THREADED_FS 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) #define DRM_VC4_PARAM_SUPPORTS_FIXED_RCL_ORDER 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) #define DRM_VC4_PARAM_SUPPORTS_MADVISE 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) #define DRM_VC4_PARAM_SUPPORTS_PERFMON 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) struct drm_vc4_get_param {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) __u32 param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) __u32 pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) __u64 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) struct drm_vc4_get_tiling {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) __u32 handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) __u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) __u64 modifier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) struct drm_vc4_set_tiling {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) __u32 handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) __u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) __u64 modifier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * struct drm_vc4_label_bo - Attach a name to a BO for debug purposes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) struct drm_vc4_label_bo {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) __u32 handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) __u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) __u64 name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * States prefixed with '__' are internal states and cannot be passed to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * DRM_IOCTL_VC4_GEM_MADVISE ioctl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) #define VC4_MADV_WILLNEED 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) #define VC4_MADV_DONTNEED 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) #define __VC4_MADV_PURGED 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) #define __VC4_MADV_NOTSUPP 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) struct drm_vc4_gem_madvise {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) __u32 handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) __u32 madv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) __u32 retained;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) __u32 pad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) VC4_PERFCNT_FEP_VALID_PRIMS_NO_RENDER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) VC4_PERFCNT_FEP_VALID_PRIMS_RENDER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) VC4_PERFCNT_FEP_CLIPPED_QUADS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) VC4_PERFCNT_FEP_VALID_QUADS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) VC4_PERFCNT_TLB_QUADS_NOT_PASSING_STENCIL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) VC4_PERFCNT_TLB_QUADS_NOT_PASSING_Z_AND_STENCIL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) VC4_PERFCNT_TLB_QUADS_PASSING_Z_AND_STENCIL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) VC4_PERFCNT_TLB_QUADS_ZERO_COVERAGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) VC4_PERFCNT_TLB_QUADS_NON_ZERO_COVERAGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) VC4_PERFCNT_TLB_QUADS_WRITTEN_TO_COLOR_BUF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) VC4_PERFCNT_PLB_PRIMS_OUTSIDE_VIEWPORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) VC4_PERFCNT_PLB_PRIMS_NEED_CLIPPING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) VC4_PERFCNT_PSE_PRIMS_REVERSED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) VC4_PERFCNT_QPU_TOTAL_IDLE_CYCLES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_VERTEX_COORD_SHADING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_FRAGMENT_SHADING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_EXEC_VALID_INST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_WAITING_TMUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_WAITING_SCOREBOARD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) VC4_PERFCNT_QPU_TOTAL_CLK_CYCLES_WAITING_VARYINGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) VC4_PERFCNT_QPU_TOTAL_INST_CACHE_HIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) VC4_PERFCNT_QPU_TOTAL_INST_CACHE_MISS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) VC4_PERFCNT_QPU_TOTAL_UNIFORM_CACHE_HIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) VC4_PERFCNT_QPU_TOTAL_UNIFORM_CACHE_MISS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) VC4_PERFCNT_TMU_TOTAL_TEXT_QUADS_PROCESSED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) VC4_PERFCNT_TMU_TOTAL_TEXT_CACHE_MISS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) VC4_PERFCNT_VPM_TOTAL_CLK_CYCLES_VDW_STALLED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) VC4_PERFCNT_VPM_TOTAL_CLK_CYCLES_VCD_STALLED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) VC4_PERFCNT_L2C_TOTAL_L2_CACHE_HIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) VC4_PERFCNT_L2C_TOTAL_L2_CACHE_MISS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) VC4_PERFCNT_NUM_EVENTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) #define DRM_VC4_MAX_PERF_COUNTERS 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) struct drm_vc4_perfmon_create {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) __u32 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) __u32 ncounters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) __u8 events[DRM_VC4_MAX_PERF_COUNTERS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) struct drm_vc4_perfmon_destroy {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) __u32 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * Returns the values of the performance counters tracked by this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * perfmon (as an array of ncounters u64 values).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * No implicit synchronization is performed, so the user has to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * guarantee that any jobs using this perfmon have already been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * completed (probably by blocking on the seqno returned by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * last exec that used the perfmon).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) struct drm_vc4_perfmon_get_values {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) __u32 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) __u64 values_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) #if defined(__cplusplus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) #endif /* _UAPI_VC4_DRM_H_ */