^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright (C) 2014 Red Hat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2014 Intel Corp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Permission is hereby granted, free of charge, to any person obtaining a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * copy of this software and associated documentation files (the "Software"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * to deal in the Software without restriction, including without limitation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * the rights to use, copy, modify, merge, publish, distribute, sublicense,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * and/or sell copies of the Software, and to permit persons to whom the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Software is furnished to do so, subject to the following conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * The above copyright notice and this permission notice shall be included in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * all copies or substantial portions of the Software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * OTHER DEALINGS IN THE SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * Authors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * Rob Clark <robdclark@gmail.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * Daniel Vetter <daniel.vetter@ffwll.ch>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/sync_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <drm/drm_atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <drm/drm_atomic_uapi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <drm/drm_bridge.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <drm/drm_debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <drm/drm_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <drm/drm_drv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <drm/drm_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <drm/drm_fourcc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <drm/drm_mode.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <drm/drm_print.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <drm/drm_writeback.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include "drm_crtc_internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include "drm_internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) void __drm_crtc_commit_free(struct kref *kref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct drm_crtc_commit *commit =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) container_of(kref, struct drm_crtc_commit, ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) kfree(commit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) EXPORT_SYMBOL(__drm_crtc_commit_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * drm_atomic_state_default_release -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * release memory initialized by drm_atomic_state_init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * @state: atomic state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * Free all the memory allocated by drm_atomic_state_init.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * This should only be used by drivers which are still subclassing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * &drm_atomic_state and haven't switched to &drm_private_state yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) void drm_atomic_state_default_release(struct drm_atomic_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) kfree(state->connectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) kfree(state->crtcs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) kfree(state->planes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) kfree(state->private_objs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) EXPORT_SYMBOL(drm_atomic_state_default_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * drm_atomic_state_init - init new atomic state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * @dev: DRM device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * @state: atomic state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * Default implementation for filling in a new atomic state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * This should only be used by drivers which are still subclassing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * &drm_atomic_state and haven't switched to &drm_private_state yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) kref_init(&state->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /* TODO legacy paths should maybe do a better job about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * setting this appropriately?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) state->allow_modeset = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) state->crtcs = kcalloc(dev->mode_config.num_crtc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) sizeof(*state->crtcs), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (!state->crtcs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) state->planes = kcalloc(dev->mode_config.num_total_plane,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) sizeof(*state->planes), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (!state->planes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) state->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) DRM_DEBUG_ATOMIC("Allocated atomic state %p\n", state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) drm_atomic_state_default_release(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) EXPORT_SYMBOL(drm_atomic_state_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * drm_atomic_state_alloc - allocate atomic state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * @dev: DRM device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * This allocates an empty atomic state to track updates.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct drm_atomic_state *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) drm_atomic_state_alloc(struct drm_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct drm_mode_config *config = &dev->mode_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (!config->funcs->atomic_state_alloc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct drm_atomic_state *state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) state = kzalloc(sizeof(*state), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) if (!state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (drm_atomic_state_init(dev, state) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) kfree(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) return state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return config->funcs->atomic_state_alloc(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) EXPORT_SYMBOL(drm_atomic_state_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * drm_atomic_state_default_clear - clear base atomic state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * @state: atomic state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * Default implementation for clearing atomic state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * This should only be used by drivers which are still subclassing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * &drm_atomic_state and haven't switched to &drm_private_state yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) void drm_atomic_state_default_clear(struct drm_atomic_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct drm_device *dev = state->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct drm_mode_config *config = &dev->mode_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) for (i = 0; i < state->num_connector; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct drm_connector *connector = state->connectors[i].ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (!connector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) connector->funcs->atomic_destroy_state(connector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) state->connectors[i].state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) state->connectors[i].ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) state->connectors[i].state = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) state->connectors[i].old_state = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) state->connectors[i].new_state = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) drm_connector_put(connector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) for (i = 0; i < config->num_crtc; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct drm_crtc *crtc = state->crtcs[i].ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (!crtc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) crtc->funcs->atomic_destroy_state(crtc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) state->crtcs[i].state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) state->crtcs[i].ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) state->crtcs[i].state = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) state->crtcs[i].old_state = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) state->crtcs[i].new_state = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (state->crtcs[i].commit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) drm_crtc_commit_put(state->crtcs[i].commit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) state->crtcs[i].commit = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) for (i = 0; i < config->num_total_plane; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct drm_plane *plane = state->planes[i].ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (!plane)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) plane->funcs->atomic_destroy_state(plane,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) state->planes[i].state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) state->planes[i].ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) state->planes[i].state = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) state->planes[i].old_state = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) state->planes[i].new_state = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) for (i = 0; i < state->num_private_objs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) struct drm_private_obj *obj = state->private_objs[i].ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) obj->funcs->atomic_destroy_state(obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) state->private_objs[i].state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) state->private_objs[i].ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) state->private_objs[i].state = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) state->private_objs[i].old_state = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) state->private_objs[i].new_state = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) state->num_private_objs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (state->fake_commit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) drm_crtc_commit_put(state->fake_commit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) state->fake_commit = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) EXPORT_SYMBOL(drm_atomic_state_default_clear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * drm_atomic_state_clear - clear state object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * @state: atomic state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * When the w/w mutex algorithm detects a deadlock we need to back off and drop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * all locks. So someone else could sneak in and change the current modeset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * configuration. Which means that all the state assembled in @state is no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * longer an atomic update to the current state, but to some arbitrary earlier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * state. Which could break assumptions the driver's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * &drm_mode_config_funcs.atomic_check likely relies on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * Hence we must clear all cached state and completely start over, using this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) void drm_atomic_state_clear(struct drm_atomic_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) struct drm_device *dev = state->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct drm_mode_config *config = &dev->mode_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (config->funcs->atomic_state_clear)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) config->funcs->atomic_state_clear(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) drm_atomic_state_default_clear(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) EXPORT_SYMBOL(drm_atomic_state_clear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * __drm_atomic_state_free - free all memory for an atomic state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * @ref: This atomic state to deallocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * This frees all memory associated with an atomic state, including all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * per-object state for planes, CRTCs and connectors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) void __drm_atomic_state_free(struct kref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) struct drm_atomic_state *state = container_of(ref, typeof(*state), ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) struct drm_mode_config *config = &state->dev->mode_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) drm_atomic_state_clear(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) DRM_DEBUG_ATOMIC("Freeing atomic state %p\n", state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (config->funcs->atomic_state_free) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) config->funcs->atomic_state_free(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) drm_atomic_state_default_release(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) kfree(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) EXPORT_SYMBOL(__drm_atomic_state_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * drm_atomic_get_crtc_state - get CRTC state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * @state: global atomic state object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * @crtc: CRTC to get state object for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * This function returns the CRTC state for the given CRTC, allocating it if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * needed. It will also grab the relevant CRTC lock to make sure that the state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * is consistent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * Either the allocated state or the error code encoded into the pointer. When
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * entire atomic sequence must be restarted. All other errors are fatal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct drm_crtc_state *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) drm_atomic_get_crtc_state(struct drm_atomic_state *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct drm_crtc *crtc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) int ret, index = drm_crtc_index(crtc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) struct drm_crtc_state *crtc_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) WARN_ON(!state->acquire_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if (crtc_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return crtc_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) crtc_state = crtc->funcs->atomic_duplicate_state(crtc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) if (!crtc_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) state->crtcs[index].state = crtc_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) state->crtcs[index].old_state = crtc->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) state->crtcs[index].new_state = crtc_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) state->crtcs[index].ptr = crtc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) crtc_state->state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) crtc->base.id, crtc->name, crtc_state, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return crtc_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) EXPORT_SYMBOL(drm_atomic_get_crtc_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) static int drm_atomic_crtc_check(const struct drm_crtc_state *old_crtc_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) const struct drm_crtc_state *new_crtc_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) struct drm_crtc *crtc = new_crtc_state->crtc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) /* NOTE: we explicitly don't enforce constraints such as primary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * layer covering entire screen, since that is something we want
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * to allow (on hw that supports it). For hw that does not, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * should be checked in driver's crtc->atomic_check() vfunc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * TODO: Add generic modeset state checks once we support those.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (new_crtc_state->active && !new_crtc_state->enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active without enabled\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) crtc->base.id, crtc->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /* The state->enable vs. state->mode_blob checks can be WARN_ON,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * as this is a kernel-internal detail that userspace should never
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * be able to trigger. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) WARN_ON(new_crtc_state->enable && !new_crtc_state->mode_blob)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled without mode blob\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) crtc->base.id, crtc->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) WARN_ON(!new_crtc_state->enable && new_crtc_state->mode_blob)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled with mode blob\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) crtc->base.id, crtc->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * Reject event generation for when a CRTC is off and stays off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * It wouldn't be hard to implement this, but userspace has a track
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * record of happily burning through 100% cpu (or worse, crash) when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * display pipe is suspended. To avoid all that fun just reject updates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * that ask for events since likely that indicates a bug in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * compositor's drawing loop. This is consistent with the vblank IOCTL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * and legacy page_flip IOCTL which also reject service on a disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * pipe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (new_crtc_state->event &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) !new_crtc_state->active && !old_crtc_state->active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requesting event but off\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) crtc->base.id, crtc->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) static void drm_atomic_crtc_print_state(struct drm_printer *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) const struct drm_crtc_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) struct drm_crtc *crtc = state->crtc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) drm_printf(p, "crtc[%u]: %s\n", crtc->base.id, crtc->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) drm_printf(p, "\tenable=%d\n", state->enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) drm_printf(p, "\tactive=%d\n", state->active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) drm_printf(p, "\tself_refresh_active=%d\n", state->self_refresh_active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) drm_printf(p, "\tplanes_changed=%d\n", state->planes_changed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) drm_printf(p, "\tmode_changed=%d\n", state->mode_changed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) drm_printf(p, "\tactive_changed=%d\n", state->active_changed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) drm_printf(p, "\tconnectors_changed=%d\n", state->connectors_changed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) drm_printf(p, "\tplane_mask=%x\n", state->plane_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) drm_printf(p, "\tconnector_mask=%x\n", state->connector_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) drm_printf(p, "\tencoder_mask=%x\n", state->encoder_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) drm_printf(p, "\tmode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(&state->mode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (crtc->funcs->atomic_print_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) crtc->funcs->atomic_print_state(p, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) static int drm_atomic_connector_check(struct drm_connector *connector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) struct drm_connector_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) struct drm_crtc_state *crtc_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) struct drm_writeback_job *writeback_job = state->writeback_job;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) const struct drm_display_info *info = &connector->display_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) state->max_bpc = info->bpc ? info->bpc : 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (connector->max_bpc_property)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) state->max_bpc = min(state->max_bpc, state->max_requested_bpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if ((connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) || !writeback_job)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (writeback_job->fb && !state->crtc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] framebuffer without CRTC\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) connector->base.id, connector->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if (state->crtc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) crtc_state = drm_atomic_get_existing_crtc_state(state->state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) state->crtc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (writeback_job->fb && !crtc_state->active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] has framebuffer, but [CRTC:%d] is off\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) connector->base.id, connector->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) state->crtc->base.id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (!writeback_job->fb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (writeback_job->out_fence) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) connector->base.id, connector->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) drm_writeback_cleanup_job(writeback_job);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) state->writeback_job = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * drm_atomic_get_plane_state - get plane state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * @state: global atomic state object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * @plane: plane to get state object for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * This function returns the plane state for the given plane, allocating it if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * needed. It will also grab the relevant plane lock to make sure that the state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * is consistent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * Either the allocated state or the error code encoded into the pointer. When
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * entire atomic sequence must be restarted. All other errors are fatal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) struct drm_plane_state *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) drm_atomic_get_plane_state(struct drm_atomic_state *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) struct drm_plane *plane)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) int ret, index = drm_plane_index(plane);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) struct drm_plane_state *plane_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) WARN_ON(!state->acquire_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) /* the legacy pointers should never be set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) WARN_ON(plane->fb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) WARN_ON(plane->old_fb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) WARN_ON(plane->crtc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) plane_state = drm_atomic_get_existing_plane_state(state, plane);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (plane_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) return plane_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) plane_state = plane->funcs->atomic_duplicate_state(plane);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (!plane_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) state->planes[index].state = plane_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) state->planes[index].ptr = plane;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) state->planes[index].old_state = plane->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) state->planes[index].new_state = plane_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) plane_state->state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) plane->base.id, plane->name, plane_state, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (plane_state->crtc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) struct drm_crtc_state *crtc_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) crtc_state = drm_atomic_get_crtc_state(state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) plane_state->crtc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if (IS_ERR(crtc_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) return ERR_CAST(crtc_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) return plane_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) EXPORT_SYMBOL(drm_atomic_get_plane_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) plane_switching_crtc(const struct drm_plane_state *old_plane_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) const struct drm_plane_state *new_plane_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (!old_plane_state->crtc || !new_plane_state->crtc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) if (old_plane_state->crtc == new_plane_state->crtc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) /* This could be refined, but currently there's no helper or driver code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * to implement direct switching of active planes nor userspace to take
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * advantage of more direct plane switching without the intermediate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * full OFF state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * drm_atomic_plane_check - check plane state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * @old_plane_state: old plane state to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * @new_plane_state: new plane state to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * Provides core sanity checks for plane state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * Zero on success, error code on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) const struct drm_plane_state *new_plane_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) struct drm_plane *plane = new_plane_state->plane;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) struct drm_crtc *crtc = new_plane_state->crtc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) const struct drm_framebuffer *fb = new_plane_state->fb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) unsigned int fb_width, fb_height;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) struct drm_mode_rect *clips;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) uint32_t num_clips;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) /* either *both* CRTC and FB must be set, or neither */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) if (crtc && !fb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) DRM_DEBUG_ATOMIC("[PLANE:%d:%s] CRTC set but no FB\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) plane->base.id, plane->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) } else if (fb && !crtc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) DRM_DEBUG_ATOMIC("[PLANE:%d:%s] FB set but no CRTC\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) plane->base.id, plane->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) /* if disabled, we don't care about the rest of the state: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (!crtc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) /* Check whether this plane is usable on this CRTC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) DRM_DEBUG_ATOMIC("Invalid [CRTC:%d:%s] for [PLANE:%d:%s]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) crtc->base.id, crtc->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) plane->base.id, plane->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) /* Check whether this plane supports the fb pixel format. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) ret = drm_plane_check_pixel_format(plane, fb->format->format,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) fb->modifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) struct drm_format_name_buf format_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid pixel format %s, modifier 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) plane->base.id, plane->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) drm_get_format_name(fb->format->format,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) &format_name),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) fb->modifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) /* Give drivers some help against integer overflows */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) if (new_plane_state->crtc_w > INT_MAX ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) new_plane_state->crtc_x > INT_MAX - (int32_t) new_plane_state->crtc_w ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) new_plane_state->crtc_h > INT_MAX ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) new_plane_state->crtc_y > INT_MAX - (int32_t) new_plane_state->crtc_h) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid CRTC coordinates %ux%u+%d+%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) plane->base.id, plane->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) new_plane_state->crtc_w, new_plane_state->crtc_h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) new_plane_state->crtc_x, new_plane_state->crtc_y);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) fb_width = fb->width << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) fb_height = fb->height << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) /* Make sure source coordinates are inside the fb. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) if (new_plane_state->src_w > fb_width ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) new_plane_state->src_x > fb_width - new_plane_state->src_w ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) new_plane_state->src_h > fb_height ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) new_plane_state->src_y > fb_height - new_plane_state->src_h) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid source coordinates "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) "%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) plane->base.id, plane->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) new_plane_state->src_w >> 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) ((new_plane_state->src_w & 0xffff) * 15625) >> 10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) new_plane_state->src_h >> 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) ((new_plane_state->src_h & 0xffff) * 15625) >> 10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) new_plane_state->src_x >> 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) ((new_plane_state->src_x & 0xffff) * 15625) >> 10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) new_plane_state->src_y >> 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) ((new_plane_state->src_y & 0xffff) * 15625) >> 10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) fb->width, fb->height);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) clips = drm_plane_get_damage_clips(new_plane_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) num_clips = drm_plane_get_damage_clips_count(new_plane_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) /* Make sure damage clips are valid and inside the fb. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) while (num_clips > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) if (clips->x1 >= clips->x2 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) clips->y1 >= clips->y2 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) clips->x1 < 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) clips->y1 < 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) clips->x2 > fb_width ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) clips->y2 > fb_height) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid damage clip %d %d %d %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) plane->base.id, plane->name, clips->x1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) clips->y1, clips->x2, clips->y2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) clips++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) num_clips--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) if (plane_switching_crtc(old_plane_state, new_plane_state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) DRM_DEBUG_ATOMIC("[PLANE:%d:%s] switching CRTC directly\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) plane->base.id, plane->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) static void drm_atomic_plane_print_state(struct drm_printer *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) const struct drm_plane_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) struct drm_plane *plane = state->plane;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) struct drm_rect src = drm_plane_state_src(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) struct drm_rect dest = drm_plane_state_dest(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) drm_printf(p, "plane[%u]: %s\n", plane->base.id, plane->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) drm_printf(p, "\tfb=%u\n", state->fb ? state->fb->base.id : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) if (state->fb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) drm_framebuffer_print_info(p, 2, state->fb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) drm_printf(p, "\tcrtc-pos=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&dest));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) drm_printf(p, "\tsrc-pos=" DRM_RECT_FP_FMT "\n", DRM_RECT_FP_ARG(&src));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) drm_printf(p, "\trotation=%x\n", state->rotation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) drm_printf(p, "\tnormalized-zpos=%x\n", state->normalized_zpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) drm_printf(p, "\tcolor-encoding=%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) drm_get_color_encoding_name(state->color_encoding));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) drm_printf(p, "\tcolor-range=%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) drm_get_color_range_name(state->color_range));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) if (plane->funcs->atomic_print_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) plane->funcs->atomic_print_state(p, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * DOC: handling driver private state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * Very often the DRM objects exposed to userspace in the atomic modeset api
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * (&drm_connector, &drm_crtc and &drm_plane) do not map neatly to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * underlying hardware. Especially for any kind of shared resources (e.g. shared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * clocks, scaler units, bandwidth and fifo limits shared among a group of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * planes or CRTCs, and so on) it makes sense to model these as independent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * objects. Drivers then need to do similar state tracking and commit ordering for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * such private (since not exposed to userpace) objects as the atomic core and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) * helpers already provide for connectors, planes and CRTCs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) * To make this easier on drivers the atomic core provides some support to track
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) * driver private state objects using struct &drm_private_obj, with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) * associated state struct &drm_private_state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) * Similar to userspace-exposed objects, private state structures can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) * acquired by calling drm_atomic_get_private_obj_state(). This also takes care
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) * of locking, hence drivers should not have a need to call drm_modeset_lock()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) * directly. Sequence of the actual hardware state commit is not handled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * drivers might need to keep track of struct drm_crtc_commit within subclassed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * structure of &drm_private_state as necessary, e.g. similar to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) * &drm_plane_state.commit. See also &drm_atomic_state.fake_commit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) * All private state structures contained in a &drm_atomic_state update can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) * iterated using for_each_oldnew_private_obj_in_state(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) * for_each_new_private_obj_in_state() and for_each_old_private_obj_in_state().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) * Drivers are recommended to wrap these for each type of driver private state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) * object they have, filtering on &drm_private_obj.funcs using for_each_if(), at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) * least if they want to iterate over all objects of a given type.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) * An earlier way to handle driver private state was by subclassing struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) * &drm_atomic_state. But since that encourages non-standard ways to implement
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) * the check/commit split atomic requires (by using e.g. "check and rollback or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) * commit instead" of "duplicate state, check, then either commit or release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * duplicated state) it is deprecated in favour of using &drm_private_state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * drm_atomic_private_obj_init - initialize private object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) * @dev: DRM device this object will be attached to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) * @obj: private object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) * @state: initial private object state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) * @funcs: pointer to the struct of function pointers that identify the object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) * type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * Initialize the private object, which can be embedded into any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * driver private object that needs its own atomic state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) drm_atomic_private_obj_init(struct drm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) struct drm_private_obj *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) struct drm_private_state *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) const struct drm_private_state_funcs *funcs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) memset(obj, 0, sizeof(*obj));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) drm_modeset_lock_init(&obj->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) obj->state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) obj->funcs = funcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) list_add_tail(&obj->head, &dev->mode_config.privobj_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) EXPORT_SYMBOL(drm_atomic_private_obj_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * drm_atomic_private_obj_fini - finalize private object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) * @obj: private object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * Finalize the private object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) drm_atomic_private_obj_fini(struct drm_private_obj *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) list_del(&obj->head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) obj->funcs->atomic_destroy_state(obj, obj->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) drm_modeset_lock_fini(&obj->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) EXPORT_SYMBOL(drm_atomic_private_obj_fini);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) * drm_atomic_get_private_obj_state - get private object state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) * @state: global atomic state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) * @obj: private object to get the state for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) * This function returns the private object state for the given private object,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) * allocating the state if needed. It will also grab the relevant private
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) * object lock to make sure that the state is consistent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) * Either the allocated state or the error code encoded into a pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) struct drm_private_state *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) drm_atomic_get_private_obj_state(struct drm_atomic_state *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) struct drm_private_obj *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) int index, num_objs, i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) struct __drm_private_objs_state *arr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) struct drm_private_state *obj_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) for (i = 0; i < state->num_private_objs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (obj == state->private_objs[i].ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) return state->private_objs[i].state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) ret = drm_modeset_lock(&obj->lock, state->acquire_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) num_objs = state->num_private_objs + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) size = sizeof(*state->private_objs) * num_objs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) arr = krealloc(state->private_objs, size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) if (!arr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) state->private_objs = arr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) index = state->num_private_objs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) memset(&state->private_objs[index], 0, sizeof(*state->private_objs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) obj_state = obj->funcs->atomic_duplicate_state(obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) if (!obj_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) state->private_objs[index].state = obj_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) state->private_objs[index].old_state = obj->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) state->private_objs[index].new_state = obj_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) state->private_objs[index].ptr = obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) obj_state->state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) state->num_private_objs = num_objs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) DRM_DEBUG_ATOMIC("Added new private object %p state %p to %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) obj, obj_state, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) return obj_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) EXPORT_SYMBOL(drm_atomic_get_private_obj_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) * drm_atomic_get_old_private_obj_state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) * @state: global atomic state object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) * @obj: private_obj to grab
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) * This function returns the old private object state for the given private_obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) * or NULL if the private_obj is not part of the global atomic state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) struct drm_private_state *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) drm_atomic_get_old_private_obj_state(struct drm_atomic_state *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) struct drm_private_obj *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) for (i = 0; i < state->num_private_objs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) if (obj == state->private_objs[i].ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) return state->private_objs[i].old_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) EXPORT_SYMBOL(drm_atomic_get_old_private_obj_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * drm_atomic_get_new_private_obj_state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) * @state: global atomic state object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) * @obj: private_obj to grab
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) * This function returns the new private object state for the given private_obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) * or NULL if the private_obj is not part of the global atomic state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) struct drm_private_state *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) drm_atomic_get_new_private_obj_state(struct drm_atomic_state *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) struct drm_private_obj *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) for (i = 0; i < state->num_private_objs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) if (obj == state->private_objs[i].ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) return state->private_objs[i].new_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) EXPORT_SYMBOL(drm_atomic_get_new_private_obj_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) * drm_atomic_get_old_connector_for_encoder - Get old connector for an encoder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) * @state: Atomic state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) * @encoder: The encoder to fetch the connector state for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) * This function finds and returns the connector that was connected to @encoder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) * as specified by the @state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) * If there is no connector in @state which previously had @encoder connected to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) * it, this function will return NULL. While this may seem like an invalid use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) * case, it is sometimes useful to differentiate commits which had no prior
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) * connectors attached to @encoder vs ones that did (and to inspect their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) * state). This is especially true in enable hooks because the pipeline has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) * changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * Returns: The old connector connected to @encoder, or NULL if the encoder is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * not connected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) struct drm_connector *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) drm_atomic_get_old_connector_for_encoder(struct drm_atomic_state *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) struct drm_encoder *encoder)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) struct drm_connector_state *conn_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) struct drm_connector *connector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) for_each_old_connector_in_state(state, connector, conn_state, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) if (conn_state->best_encoder == encoder)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) return connector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) EXPORT_SYMBOL(drm_atomic_get_old_connector_for_encoder);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) * drm_atomic_get_new_connector_for_encoder - Get new connector for an encoder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) * @state: Atomic state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) * @encoder: The encoder to fetch the connector state for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) * This function finds and returns the connector that will be connected to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) * @encoder as specified by the @state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) * If there is no connector in @state which will have @encoder connected to it,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) * this function will return NULL. While this may seem like an invalid use case,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) * it is sometimes useful to differentiate commits which have no connectors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) * attached to @encoder vs ones that do (and to inspect their state). This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) * especially true in disable hooks because the pipeline will change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) * Returns: The new connector connected to @encoder, or NULL if the encoder is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) * not connected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) struct drm_connector *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) drm_atomic_get_new_connector_for_encoder(struct drm_atomic_state *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) struct drm_encoder *encoder)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) struct drm_connector_state *conn_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) struct drm_connector *connector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) for_each_new_connector_in_state(state, connector, conn_state, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) if (conn_state->best_encoder == encoder)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) return connector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) EXPORT_SYMBOL(drm_atomic_get_new_connector_for_encoder);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) * drm_atomic_get_connector_state - get connector state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) * @state: global atomic state object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) * @connector: connector to get state object for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) * This function returns the connector state for the given connector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) * allocating it if needed. It will also grab the relevant connector lock to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) * make sure that the state is consistent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) * Either the allocated state or the error code encoded into the pointer. When
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) * entire atomic sequence must be restarted. All other errors are fatal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) struct drm_connector_state *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) drm_atomic_get_connector_state(struct drm_atomic_state *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) struct drm_connector *connector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) int ret, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) struct drm_mode_config *config = &connector->dev->mode_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) struct drm_connector_state *connector_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) WARN_ON(!state->acquire_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) index = drm_connector_index(connector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) if (index >= state->num_connector) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) struct __drm_connnectors_state *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) int alloc = max(index + 1, config->num_connector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) if (!c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) state->connectors = c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) memset(&state->connectors[state->num_connector], 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) sizeof(*state->connectors) * (alloc - state->num_connector));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) state->num_connector = alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) if (state->connectors[index].state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) return state->connectors[index].state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) connector_state = connector->funcs->atomic_duplicate_state(connector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) if (!connector_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) drm_connector_get(connector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) state->connectors[index].state = connector_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) state->connectors[index].old_state = connector->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) state->connectors[index].new_state = connector_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) state->connectors[index].ptr = connector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) connector_state->state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d:%s] %p state to %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) connector->base.id, connector->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) connector_state, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) if (connector_state->crtc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) struct drm_crtc_state *crtc_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) crtc_state = drm_atomic_get_crtc_state(state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) connector_state->crtc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) if (IS_ERR(crtc_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) return ERR_CAST(crtc_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) return connector_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) EXPORT_SYMBOL(drm_atomic_get_connector_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) static void drm_atomic_connector_print_state(struct drm_printer *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) const struct drm_connector_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) struct drm_connector *connector = state->connector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) drm_printf(p, "\tself_refresh_aware=%d\n", state->self_refresh_aware);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) if (state->writeback_job && state->writeback_job->fb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) drm_printf(p, "\tfb=%d\n", state->writeback_job->fb->base.id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) if (connector->funcs->atomic_print_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) connector->funcs->atomic_print_state(p, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) * drm_atomic_get_bridge_state - get bridge state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) * @state: global atomic state object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) * @bridge: bridge to get state object for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) * This function returns the bridge state for the given bridge, allocating it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) * if needed. It will also grab the relevant bridge lock to make sure that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) * state is consistent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) * Either the allocated state or the error code encoded into the pointer. When
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) * entire atomic sequence must be restarted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) struct drm_bridge_state *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) drm_atomic_get_bridge_state(struct drm_atomic_state *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) struct drm_bridge *bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) struct drm_private_state *obj_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) obj_state = drm_atomic_get_private_obj_state(state, &bridge->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) if (IS_ERR(obj_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) return ERR_CAST(obj_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) return drm_priv_to_bridge_state(obj_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) EXPORT_SYMBOL(drm_atomic_get_bridge_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) * drm_atomic_get_old_bridge_state - get old bridge state, if it exists
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) * @state: global atomic state object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) * @bridge: bridge to grab
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) * This function returns the old bridge state for the given bridge, or NULL if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) * the bridge is not part of the global atomic state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) struct drm_bridge_state *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) drm_atomic_get_old_bridge_state(struct drm_atomic_state *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) struct drm_bridge *bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) struct drm_private_state *obj_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) obj_state = drm_atomic_get_old_private_obj_state(state, &bridge->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) if (!obj_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) return drm_priv_to_bridge_state(obj_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) EXPORT_SYMBOL(drm_atomic_get_old_bridge_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) * drm_atomic_get_new_bridge_state - get new bridge state, if it exists
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) * @state: global atomic state object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) * @bridge: bridge to grab
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) * This function returns the new bridge state for the given bridge, or NULL if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) * the bridge is not part of the global atomic state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) struct drm_bridge_state *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) drm_atomic_get_new_bridge_state(struct drm_atomic_state *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) struct drm_bridge *bridge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) struct drm_private_state *obj_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) obj_state = drm_atomic_get_new_private_obj_state(state, &bridge->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) if (!obj_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) return drm_priv_to_bridge_state(obj_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) EXPORT_SYMBOL(drm_atomic_get_new_bridge_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) * drm_atomic_add_encoder_bridges - add bridges attached to an encoder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) * @state: atomic state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) * @encoder: DRM encoder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) * This function adds all bridges attached to @encoder. This is needed to add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) * bridge states to @state and make them available when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) * &drm_bridge_funcs.atomic_check(), &drm_bridge_funcs.atomic_pre_enable(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) * &drm_bridge_funcs.atomic_enable(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) * &drm_bridge_funcs.atomic_disable_post_disable() are called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) * then the w/w mutex code has detected a deadlock and the entire atomic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) * sequence must be restarted. All other errors are fatal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) drm_atomic_add_encoder_bridges(struct drm_atomic_state *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) struct drm_encoder *encoder)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) struct drm_bridge_state *bridge_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) struct drm_bridge *bridge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) if (!encoder)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) DRM_DEBUG_ATOMIC("Adding all bridges for [encoder:%d:%s] to %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) encoder->base.id, encoder->name, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) drm_for_each_bridge_in_chain(encoder, bridge) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) /* Skip bridges that don't implement the atomic state hooks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) if (!bridge->funcs->atomic_duplicate_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) bridge_state = drm_atomic_get_bridge_state(state, bridge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) if (IS_ERR(bridge_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) return PTR_ERR(bridge_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) EXPORT_SYMBOL(drm_atomic_add_encoder_bridges);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) * drm_atomic_add_affected_connectors - add connectors for CRTC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) * @state: atomic state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) * @crtc: DRM CRTC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) * This function walks the current configuration and adds all connectors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) * currently using @crtc to the atomic configuration @state. Note that this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) * function must acquire the connection mutex. This can potentially cause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) * unneeded seralization if the update is just for the planes on one CRTC. Hence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) * drivers and helpers should only call this when really needed (e.g. when a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) * full modeset needs to happen due to some change).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) * then the w/w mutex code has detected a deadlock and the entire atomic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) * sequence must be restarted. All other errors are fatal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) struct drm_crtc *crtc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) struct drm_mode_config *config = &state->dev->mode_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) struct drm_connector *connector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) struct drm_connector_state *conn_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) struct drm_connector_list_iter conn_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) struct drm_crtc_state *crtc_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) crtc_state = drm_atomic_get_crtc_state(state, crtc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) if (IS_ERR(crtc_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) return PTR_ERR(crtc_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d:%s] to %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) crtc->base.id, crtc->name, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) * Changed connectors are already in @state, so only need to look
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) * at the connector_mask in crtc_state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) drm_connector_list_iter_begin(state->dev, &conn_iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) drm_for_each_connector_iter(connector, &conn_iter) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) if (!(crtc_state->connector_mask & drm_connector_mask(connector)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) conn_state = drm_atomic_get_connector_state(state, connector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) if (IS_ERR(conn_state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) drm_connector_list_iter_end(&conn_iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) return PTR_ERR(conn_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) drm_connector_list_iter_end(&conn_iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) EXPORT_SYMBOL(drm_atomic_add_affected_connectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) * drm_atomic_add_affected_planes - add planes for CRTC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) * @state: atomic state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) * @crtc: DRM CRTC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) * This function walks the current configuration and adds all planes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) * currently used by @crtc to the atomic configuration @state. This is useful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) * when an atomic commit also needs to check all currently enabled plane on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) * @crtc, e.g. when changing the mode. It's also useful when re-enabling a CRTC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) * to avoid special code to force-enable all planes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) * Since acquiring a plane state will always also acquire the w/w mutex of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) * current CRTC for that plane (if there is any) adding all the plane states for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) * a CRTC will not reduce parallism of atomic updates.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) * then the w/w mutex code has detected a deadlock and the entire atomic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) * sequence must be restarted. All other errors are fatal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) drm_atomic_add_affected_planes(struct drm_atomic_state *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) struct drm_crtc *crtc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) const struct drm_crtc_state *old_crtc_state =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) drm_atomic_get_old_crtc_state(state, crtc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) struct drm_plane *plane;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) DRM_DEBUG_ATOMIC("Adding all current planes for [CRTC:%d:%s] to %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) crtc->base.id, crtc->name, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) drm_for_each_plane_mask(plane, state->dev, old_crtc_state->plane_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) struct drm_plane_state *plane_state =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) drm_atomic_get_plane_state(state, plane);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) if (IS_ERR(plane_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) return PTR_ERR(plane_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) EXPORT_SYMBOL(drm_atomic_add_affected_planes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) * drm_atomic_check_only - check whether a given config would work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) * @state: atomic configuration to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) * Note that this function can return -EDEADLK if the driver needed to acquire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) * more locks but encountered a deadlock. The caller must then do the usual w/w
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) * backoff dance and restart. All other errors are fatal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) * 0 on success, negative error code on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) int drm_atomic_check_only(struct drm_atomic_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) struct drm_device *dev = state->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) struct drm_mode_config *config = &dev->mode_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) struct drm_plane *plane;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) struct drm_plane_state *old_plane_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) struct drm_plane_state *new_plane_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) struct drm_crtc *crtc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) struct drm_crtc_state *old_crtc_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) struct drm_crtc_state *new_crtc_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) struct drm_connector *conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) struct drm_connector_state *conn_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) int i, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) DRM_DEBUG_ATOMIC("checking %p\n", state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) ret = drm_atomic_plane_check(old_plane_state, new_plane_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic core check failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) plane->base.id, plane->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) ret = drm_atomic_crtc_check(old_crtc_state, new_crtc_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic core check failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) crtc->base.id, crtc->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) for_each_new_connector_in_state(state, conn, conn_state, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) ret = drm_atomic_connector_check(conn, conn_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] atomic core check failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) conn->base.id, conn->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) if (config->funcs->atomic_check) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) ret = config->funcs->atomic_check(state->dev, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) DRM_DEBUG_ATOMIC("atomic driver check for %p failed: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) state, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) if (!state->allow_modeset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requires full modeset\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) crtc->base.id, crtc->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) EXPORT_SYMBOL(drm_atomic_check_only);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) * drm_atomic_commit - commit configuration atomically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) * @state: atomic configuration to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) * Note that this function can return -EDEADLK if the driver needed to acquire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) * more locks but encountered a deadlock. The caller must then do the usual w/w
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) * backoff dance and restart. All other errors are fatal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) * This function will take its own reference on @state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) * Callers should always release their reference with drm_atomic_state_put().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) * 0 on success, negative error code on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) int drm_atomic_commit(struct drm_atomic_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) struct drm_mode_config *config = &state->dev->mode_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) ret = drm_atomic_check_only(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) DRM_DEBUG_ATOMIC("committing %p\n", state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) return config->funcs->atomic_commit(state->dev, state, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) EXPORT_SYMBOL(drm_atomic_commit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) * drm_atomic_nonblocking_commit - atomic nonblocking commit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) * @state: atomic configuration to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) * Note that this function can return -EDEADLK if the driver needed to acquire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) * more locks but encountered a deadlock. The caller must then do the usual w/w
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) * backoff dance and restart. All other errors are fatal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) * This function will take its own reference on @state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) * Callers should always release their reference with drm_atomic_state_put().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) * 0 on success, negative error code on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) int drm_atomic_nonblocking_commit(struct drm_atomic_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) struct drm_mode_config *config = &state->dev->mode_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) ret = drm_atomic_check_only(state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) DRM_DEBUG_ATOMIC("committing %p nonblocking\n", state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) return config->funcs->atomic_commit(state->dev, state, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) EXPORT_SYMBOL(drm_atomic_nonblocking_commit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) /* just used from drm-client and atomic-helper: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) int __drm_atomic_helper_disable_plane(struct drm_plane *plane,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) struct drm_plane_state *plane_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) drm_atomic_set_fb_for_plane(plane_state, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) plane_state->crtc_x = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) plane_state->crtc_y = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) plane_state->crtc_w = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) plane_state->crtc_h = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) plane_state->src_x = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) plane_state->src_y = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) plane_state->src_w = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) plane_state->src_h = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) EXPORT_SYMBOL(__drm_atomic_helper_disable_plane);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) static int update_output_state(struct drm_atomic_state *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) struct drm_mode_set *set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) struct drm_device *dev = set->crtc->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) struct drm_crtc *crtc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) struct drm_crtc_state *new_crtc_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) struct drm_connector *connector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) struct drm_connector_state *new_conn_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) state->acquire_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) /* First disable all connectors on the target crtc. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) ret = drm_atomic_add_affected_connectors(state, set->crtc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) for_each_new_connector_in_state(state, connector, new_conn_state, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) if (new_conn_state->crtc == set->crtc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) ret = drm_atomic_set_crtc_for_connector(new_conn_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) /* Make sure legacy setCrtc always re-trains */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) new_conn_state->link_status = DRM_LINK_STATUS_GOOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) /* Then set all connectors from set->connectors on the target crtc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) for (i = 0; i < set->num_connectors; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) new_conn_state = drm_atomic_get_connector_state(state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) set->connectors[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) if (IS_ERR(new_conn_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) return PTR_ERR(new_conn_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) ret = drm_atomic_set_crtc_for_connector(new_conn_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) set->crtc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) * Don't update ->enable for the CRTC in the set_config request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) * since a mismatch would indicate a bug in the upper layers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) * The actual modeset code later on will catch any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) * inconsistencies here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) if (crtc == set->crtc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) if (!new_crtc_state->connector_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) ret = drm_atomic_set_mode_prop_for_crtc(new_crtc_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) new_crtc_state->active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) /* just used from drm-client and atomic-helper: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) int __drm_atomic_helper_set_config(struct drm_mode_set *set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) struct drm_atomic_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) struct drm_crtc_state *crtc_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) struct drm_plane_state *primary_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) struct drm_crtc *crtc = set->crtc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) int hdisplay, vdisplay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) crtc_state = drm_atomic_get_crtc_state(state, crtc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) if (IS_ERR(crtc_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) return PTR_ERR(crtc_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) primary_state = drm_atomic_get_plane_state(state, crtc->primary);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) if (IS_ERR(primary_state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) return PTR_ERR(primary_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) if (!set->mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) WARN_ON(set->fb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) WARN_ON(set->num_connectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) crtc_state->active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) ret = drm_atomic_set_crtc_for_plane(primary_state, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) drm_atomic_set_fb_for_plane(primary_state, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) goto commit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) WARN_ON(!set->fb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) WARN_ON(!set->num_connectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) ret = drm_atomic_set_mode_for_crtc(crtc_state, set->mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) crtc_state->active = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) ret = drm_atomic_set_crtc_for_plane(primary_state, crtc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) drm_mode_get_hv_timing(set->mode, &hdisplay, &vdisplay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) drm_atomic_set_fb_for_plane(primary_state, set->fb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) primary_state->crtc_x = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) primary_state->crtc_y = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) primary_state->crtc_w = hdisplay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) primary_state->crtc_h = vdisplay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) primary_state->src_x = set->x << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) primary_state->src_y = set->y << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) if (drm_rotation_90_or_270(primary_state->rotation)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) primary_state->src_w = vdisplay << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) primary_state->src_h = hdisplay << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) primary_state->src_w = hdisplay << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) primary_state->src_h = vdisplay << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) commit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) ret = update_output_state(state, set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) EXPORT_SYMBOL(__drm_atomic_helper_set_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) void drm_atomic_print_state(const struct drm_atomic_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) struct drm_printer p = drm_info_printer(state->dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) struct drm_plane *plane;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) struct drm_plane_state *plane_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) struct drm_crtc *crtc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) struct drm_crtc_state *crtc_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) struct drm_connector *connector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) struct drm_connector_state *connector_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) DRM_DEBUG_ATOMIC("checking %p\n", state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) for_each_new_plane_in_state(state, plane, plane_state, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) drm_atomic_plane_print_state(&p, plane_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) for_each_new_crtc_in_state(state, crtc, crtc_state, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) drm_atomic_crtc_print_state(&p, crtc_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) for_each_new_connector_in_state(state, connector, connector_state, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) drm_atomic_connector_print_state(&p, connector_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) bool take_locks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) struct drm_mode_config *config = &dev->mode_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) struct drm_plane *plane;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) struct drm_crtc *crtc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) struct drm_connector *connector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) struct drm_connector_list_iter conn_iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) if (!drm_drv_uses_atomic_modeset(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) list_for_each_entry(plane, &config->plane_list, head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) if (take_locks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) drm_modeset_lock(&plane->mutex, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) drm_atomic_plane_print_state(p, plane->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) if (take_locks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) drm_modeset_unlock(&plane->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) list_for_each_entry(crtc, &config->crtc_list, head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) if (take_locks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) drm_modeset_lock(&crtc->mutex, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) drm_atomic_crtc_print_state(p, crtc->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) if (take_locks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) drm_modeset_unlock(&crtc->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) drm_connector_list_iter_begin(dev, &conn_iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) if (take_locks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) drm_for_each_connector_iter(connector, &conn_iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) drm_atomic_connector_print_state(p, connector->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) if (take_locks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) drm_modeset_unlock(&dev->mode_config.connection_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) drm_connector_list_iter_end(&conn_iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) * drm_state_dump - dump entire device atomic state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) * @dev: the drm device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) * @p: where to print the state to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) * Just for debugging. Drivers might want an option to dump state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) * to dmesg in case of error irq's. (Hint, you probably want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) * ratelimit this!)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) * The caller must drm_modeset_lock_all(), or if this is called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) * from error irq handler, it should not be enabled by default.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) * (Ie. if you are debugging errors you might not care that this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) * is racey. But calling this without all modeset locks held is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) * not inherently safe.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) void drm_state_dump(struct drm_device *dev, struct drm_printer *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) __drm_state_dump(dev, p, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) EXPORT_SYMBOL(drm_state_dump);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) static int drm_state_info(struct seq_file *m, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) struct drm_info_node *node = (struct drm_info_node *) m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) struct drm_device *dev = node->minor->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) struct drm_printer p = drm_seq_file_printer(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) __drm_state_dump(dev, &p, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) /* any use in debugfs files to dump individual planes/crtc/etc? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) static const struct drm_info_list drm_atomic_debugfs_list[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) {"state", drm_state_info, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) void drm_atomic_debugfs_init(struct drm_minor *minor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) drm_debugfs_create_files(drm_atomic_debugfs_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) ARRAY_SIZE(drm_atomic_debugfs_list),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) minor->debugfs_root, minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) #endif