^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright 2008 Advanced Micro Devices, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright 2008 Red Hat Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright 2009 Jerome Glisse.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Permission is hereby granted, free of charge, to any person obtaining a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * copy of this software and associated documentation files (the "Software"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * to deal in the Software without restriction, including without limitation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * the rights to use, copy, modify, merge, publish, distribute, sublicense,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * and/or sell copies of the Software, and to permit persons to whom the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Software is furnished to do so, subject to the following conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * The above copyright notice and this permission notice shall be included in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * all copies or substantial portions of the Software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * OTHER DEALINGS IN THE SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * Authors: Dave Airlie
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * Alex Deucher
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * Jerome Glisse
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/firmware.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <drm/drm_debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <drm/drm_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <drm/drm_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <drm/drm_fourcc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <drm/drm_vblank.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <drm/radeon_drm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include "atom.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include "r100_reg_safe.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include "r100d.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include "radeon.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include "radeon_asic.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include "radeon_reg.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include "rn50_reg_safe.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include "rs100d.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include "rv200d.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include "rv250d.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) /* Firmware Names */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define FIRMWARE_R100 "radeon/R100_cp.bin"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define FIRMWARE_R200 "radeon/R200_cp.bin"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define FIRMWARE_R300 "radeon/R300_cp.bin"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define FIRMWARE_R420 "radeon/R420_cp.bin"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define FIRMWARE_RS690 "radeon/RS690_cp.bin"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define FIRMWARE_RS600 "radeon/RS600_cp.bin"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define FIRMWARE_R520 "radeon/R520_cp.bin"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) MODULE_FIRMWARE(FIRMWARE_R100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) MODULE_FIRMWARE(FIRMWARE_R200);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) MODULE_FIRMWARE(FIRMWARE_R300);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) MODULE_FIRMWARE(FIRMWARE_R420);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) MODULE_FIRMWARE(FIRMWARE_RS690);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) MODULE_FIRMWARE(FIRMWARE_RS600);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) MODULE_FIRMWARE(FIRMWARE_R520);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #include "r100_track.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /* This files gather functions specifics to:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * and others in some cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static bool r100_is_in_vblank(struct radeon_device *rdev, int crtc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (crtc == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static bool r100_is_counter_moving(struct radeon_device *rdev, int crtc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) u32 vline1, vline2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (crtc == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) vline1 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) vline2 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) vline1 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) vline2 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (vline1 != vline2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * r100_wait_for_vblank - vblank wait asic callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * @rdev: radeon_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * @crtc: crtc to wait for vblank on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * Wait for vblank on the requested crtc (r1xx-r4xx).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) void r100_wait_for_vblank(struct radeon_device *rdev, int crtc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) unsigned i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (crtc >= rdev->num_crtc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if (crtc == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (!(RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) if (!(RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /* depending on when we hit vblank, we may be close to active; if so,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * wait for another frame.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) while (r100_is_in_vblank(rdev, crtc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (i++ % 100 == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (!r100_is_counter_moving(rdev, crtc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) while (!r100_is_in_vblank(rdev, crtc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) if (i++ % 100 == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) if (!r100_is_counter_moving(rdev, crtc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * r100_page_flip - pageflip callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * @rdev: radeon_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * @crtc_id: crtc to cleanup pageflip on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * @crtc_base: new address of the crtc (GPU MC address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * Does the actual pageflip (r1xx-r4xx).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * During vblank we take the crtc lock and wait for the update_pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * bit to go high, when it does, we release the lock, and allow the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * double buffered update to take place.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) void r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base, bool async)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /* Lock the graphics update lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /* update the scanout addresses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) /* Wait for update_pending to go high. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) for (i = 0; i < rdev->usec_timeout; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /* Unlock the lock, so double-buffering can take place inside vblank */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * r100_page_flip_pending - check if page flip is still pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * @rdev: radeon_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * @crtc_id: crtc to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * Check if the last pagefilp is still pending (r1xx-r4xx).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * Returns the current update pending status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) bool r100_page_flip_pending(struct radeon_device *rdev, int crtc_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) /* Return current update_pending status: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) return !!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * r100_pm_get_dynpm_state - look up dynpm power state callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * @rdev: radeon_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * Look up the optimal power state based on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * current state of the GPU (r1xx-r5xx).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * Used for dynpm only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) void r100_pm_get_dynpm_state(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) rdev->pm.dynpm_can_upclock = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) rdev->pm.dynpm_can_downclock = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) switch (rdev->pm.dynpm_planned_action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) case DYNPM_ACTION_MINIMUM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) rdev->pm.requested_power_state_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) rdev->pm.dynpm_can_downclock = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) case DYNPM_ACTION_DOWNCLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (rdev->pm.current_power_state_index == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) rdev->pm.dynpm_can_downclock = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (rdev->pm.active_crtc_count > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) for (i = 0; i < rdev->pm.num_power_states; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) else if (i >= rdev->pm.current_power_state_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) rdev->pm.requested_power_state_index = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) rdev->pm.requested_power_state_index =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) rdev->pm.current_power_state_index - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) /* don't use the power state if crtcs are active and no display flag is set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if ((rdev->pm.active_crtc_count > 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) (rdev->pm.power_state[rdev->pm.requested_power_state_index].clock_info[0].flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) RADEON_PM_MODE_NO_DISPLAY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) rdev->pm.requested_power_state_index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) case DYNPM_ACTION_UPCLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) rdev->pm.dynpm_can_upclock = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (rdev->pm.active_crtc_count > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) else if (i <= rdev->pm.current_power_state_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) rdev->pm.requested_power_state_index = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) rdev->pm.requested_power_state_index =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) rdev->pm.current_power_state_index + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) case DYNPM_ACTION_DEFAULT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) rdev->pm.dynpm_can_upclock = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) case DYNPM_ACTION_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) DRM_ERROR("Requested mode for not defined action\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /* only one clock mode per power state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) rdev->pm.requested_clock_mode_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) rdev->pm.power_state[rdev->pm.requested_power_state_index].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) clock_info[rdev->pm.requested_clock_mode_index].sclk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) rdev->pm.power_state[rdev->pm.requested_power_state_index].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) clock_info[rdev->pm.requested_clock_mode_index].mclk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) rdev->pm.power_state[rdev->pm.requested_power_state_index].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) pcie_lanes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * r100_pm_init_profile - Initialize power profiles callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * @rdev: radeon_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * Initialize the power states used in profile mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * (r1xx-r3xx).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * Used for profile mode only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) void r100_pm_init_profile(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /* default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /* low sh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) /* mid sh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) /* high sh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) /* low mh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) /* mid mh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) /* high mh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * r100_pm_misc - set additional pm hw parameters callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * @rdev: radeon_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * Set non-clock parameters associated with a power state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * (voltage, pcie lanes, etc.) (r1xx-r4xx).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) void r100_pm_misc(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) int requested_index = rdev->pm.requested_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) u32 tmp, sclk_cntl, sclk_cntl2, sclk_more_cntl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) tmp = RREG32(voltage->gpio.reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (voltage->active_high)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) tmp |= voltage->gpio.mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) tmp &= ~(voltage->gpio.mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) WREG32(voltage->gpio.reg, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (voltage->delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) udelay(voltage->delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) tmp = RREG32(voltage->gpio.reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (voltage->active_high)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) tmp &= ~voltage->gpio.mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) tmp |= voltage->gpio.mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) WREG32(voltage->gpio.reg, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (voltage->delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) udelay(voltage->delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) sclk_cntl = RREG32_PLL(SCLK_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) sclk_cntl2 = RREG32_PLL(SCLK_CNTL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) sclk_cntl2 &= ~REDUCED_SPEED_SCLK_SEL(3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) sclk_more_cntl = RREG32_PLL(SCLK_MORE_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) sclk_more_cntl &= ~VOLTAGE_DELAY_SEL(3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) sclk_more_cntl |= REDUCED_SPEED_SCLK_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (ps->misc & ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) sclk_cntl2 |= REDUCED_SPEED_SCLK_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) sclk_cntl2 &= ~REDUCED_SPEED_SCLK_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) sclk_more_cntl &= ~REDUCED_SPEED_SCLK_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) sclk_more_cntl |= IO_CG_VOLTAGE_DROP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (voltage->delay) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) sclk_more_cntl |= VOLTAGE_DROP_SYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) switch (voltage->delay) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) case 33:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) sclk_more_cntl |= VOLTAGE_DELAY_SEL(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) case 66:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) sclk_more_cntl |= VOLTAGE_DELAY_SEL(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) case 99:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) sclk_more_cntl |= VOLTAGE_DELAY_SEL(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) case 132:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) sclk_more_cntl |= VOLTAGE_DELAY_SEL(3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) sclk_more_cntl &= ~VOLTAGE_DROP_SYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) sclk_more_cntl &= ~IO_CG_VOLTAGE_DROP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) sclk_cntl &= ~FORCE_HDP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) sclk_cntl |= FORCE_HDP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) WREG32_PLL(SCLK_CNTL, sclk_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) WREG32_PLL(SCLK_CNTL2, sclk_cntl2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) WREG32_PLL(SCLK_MORE_CNTL, sclk_more_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) /* set pcie lanes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) if ((rdev->flags & RADEON_IS_PCIE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) !(rdev->flags & RADEON_IS_IGP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) rdev->asic->pm.set_pcie_lanes &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) (ps->pcie_lanes !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) radeon_set_pcie_lanes(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) ps->pcie_lanes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) DRM_DEBUG_DRIVER("Setting: p: %d\n", ps->pcie_lanes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * r100_pm_prepare - pre-power state change callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * @rdev: radeon_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * Prepare for a power state change (r1xx-r4xx).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) void r100_pm_prepare(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) struct drm_device *ddev = rdev->ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) struct drm_crtc *crtc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) struct radeon_crtc *radeon_crtc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) /* disable any active CRTCs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) radeon_crtc = to_radeon_crtc(crtc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (radeon_crtc->enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (radeon_crtc->crtc_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) tmp |= RADEON_CRTC2_DISP_REQ_EN_B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) tmp = RREG32(RADEON_CRTC_GEN_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) tmp |= RADEON_CRTC_DISP_REQ_EN_B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) WREG32(RADEON_CRTC_GEN_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * r100_pm_finish - post-power state change callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * @rdev: radeon_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * Clean up after a power state change (r1xx-r4xx).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) void r100_pm_finish(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) struct drm_device *ddev = rdev->ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) struct drm_crtc *crtc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) struct radeon_crtc *radeon_crtc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) /* enable any active CRTCs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) radeon_crtc = to_radeon_crtc(crtc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (radeon_crtc->enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (radeon_crtc->crtc_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) tmp &= ~RADEON_CRTC2_DISP_REQ_EN_B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) tmp = RREG32(RADEON_CRTC_GEN_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) tmp &= ~RADEON_CRTC_DISP_REQ_EN_B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) WREG32(RADEON_CRTC_GEN_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * r100_gui_idle - gui idle callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * @rdev: radeon_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * Check of the GUI (2D/3D engines) are idle (r1xx-r5xx).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * Returns true if idle, false if not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) bool r100_gui_idle(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) /* hpd for digital panel detect/disconnect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * r100_hpd_sense - hpd sense callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * @rdev: radeon_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * @hpd: hpd (hotplug detect) pin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * Checks if a digital monitor is connected (r1xx-r4xx).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * Returns true if connected, false if not connected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) bool connected = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) switch (hpd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) case RADEON_HPD_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) if (RREG32(RADEON_FP_GEN_CNTL) & RADEON_FP_DETECT_SENSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) connected = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) case RADEON_HPD_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) if (RREG32(RADEON_FP2_GEN_CNTL) & RADEON_FP2_DETECT_SENSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) connected = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) return connected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * r100_hpd_set_polarity - hpd set polarity callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * @rdev: radeon_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * @hpd: hpd (hotplug detect) pin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) * Set the polarity of the hpd pin (r1xx-r4xx).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) void r100_hpd_set_polarity(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) enum radeon_hpd_id hpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) bool connected = r100_hpd_sense(rdev, hpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) switch (hpd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) case RADEON_HPD_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) tmp = RREG32(RADEON_FP_GEN_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) if (connected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) tmp &= ~RADEON_FP_DETECT_INT_POL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) tmp |= RADEON_FP_DETECT_INT_POL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) WREG32(RADEON_FP_GEN_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) case RADEON_HPD_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) tmp = RREG32(RADEON_FP2_GEN_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (connected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) tmp &= ~RADEON_FP2_DETECT_INT_POL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) tmp |= RADEON_FP2_DETECT_INT_POL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) WREG32(RADEON_FP2_GEN_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) * r100_hpd_init - hpd setup callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) * @rdev: radeon_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) * Setup the hpd pins used by the card (r1xx-r4xx).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) * Set the polarity, and enable the hpd interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) void r100_hpd_init(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) struct drm_device *dev = rdev->ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) struct drm_connector *connector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) unsigned enable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) struct radeon_connector *radeon_connector = to_radeon_connector(connector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) enable |= 1 << radeon_connector->hpd.hpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) radeon_irq_kms_enable_hpd(rdev, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) * r100_hpd_fini - hpd tear down callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) * @rdev: radeon_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * Tear down the hpd pins used by the card (r1xx-r4xx).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * Disable the hpd interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) void r100_hpd_fini(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) struct drm_device *dev = rdev->ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) struct drm_connector *connector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) unsigned disable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) struct radeon_connector *radeon_connector = to_radeon_connector(connector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) disable |= 1 << radeon_connector->hpd.hpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) radeon_irq_kms_disable_hpd(rdev, disable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * PCI GART
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) void r100_pci_gart_tlb_flush(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) /* TODO: can we do somethings here ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) /* It seems hw only cache one entry so we should discard this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * entry otherwise if first GPU GART read hit this entry it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * could end up in wrong address. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) int r100_pci_gart_init(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) if (rdev->gart.ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) WARN(1, "R100 PCI GART already initialized\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) /* Initialize common gart structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) r = radeon_gart_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) rdev->asic->gart.set_page = &r100_pci_gart_set_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) return radeon_gart_table_ram_alloc(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) int r100_pci_gart_enable(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) uint32_t tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) /* discard memory request outside of configured range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) WREG32(RADEON_AIC_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) /* set address range for PCI address translate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) WREG32(RADEON_AIC_HI_ADDR, rdev->mc.gtt_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) /* set PCI GART page-table base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) WREG32(RADEON_AIC_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) r100_pci_gart_tlb_flush(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) DRM_INFO("PCI GART of %uM enabled (table at 0x%016llX).\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) (unsigned)(rdev->mc.gtt_size >> 20),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) (unsigned long long)rdev->gart.table_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) rdev->gart.ready = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) void r100_pci_gart_disable(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) uint32_t tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) /* discard memory request outside of configured range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) WREG32(RADEON_AIC_LO_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) WREG32(RADEON_AIC_HI_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) uint64_t entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) u32 *gtt = rdev->gart.ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) gtt[i] = cpu_to_le32(lower_32_bits(entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) void r100_pci_gart_fini(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) radeon_gart_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) r100_pci_gart_disable(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) radeon_gart_table_ram_free(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) int r100_irq_set(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) uint32_t tmp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) if (!rdev->irq.installed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) WREG32(R_000040_GEN_INT_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) tmp |= RADEON_SW_INT_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) if (rdev->irq.crtc_vblank_int[0] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) atomic_read(&rdev->irq.pflip[0])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) tmp |= RADEON_CRTC_VBLANK_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) if (rdev->irq.crtc_vblank_int[1] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) atomic_read(&rdev->irq.pflip[1])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) tmp |= RADEON_CRTC2_VBLANK_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) if (rdev->irq.hpd[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) tmp |= RADEON_FP_DETECT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (rdev->irq.hpd[1]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) tmp |= RADEON_FP2_DETECT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) WREG32(RADEON_GEN_INT_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) /* read back to post the write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) RREG32(RADEON_GEN_INT_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) void r100_irq_disable(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) WREG32(R_000040_GEN_INT_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) /* Wait and acknowledge irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) tmp = RREG32(R_000044_GEN_INT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) WREG32(R_000044_GEN_INT_STATUS, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) static uint32_t r100_irq_ack(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) uint32_t irq_mask = RADEON_SW_INT_TEST |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) if (irqs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) WREG32(RADEON_GEN_INT_STATUS, irqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) return irqs & irq_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) int r100_irq_process(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) uint32_t status, msi_rearm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) bool queue_hotplug = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) status = r100_irq_ack(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) if (!status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (rdev->shutdown) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) while (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) /* SW interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if (status & RADEON_SW_INT_TEST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) /* Vertical blank interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (status & RADEON_CRTC_VBLANK_STAT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) if (rdev->irq.crtc_vblank_int[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) drm_handle_vblank(rdev->ddev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) rdev->pm.vblank_sync = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) wake_up(&rdev->irq.vblank_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (atomic_read(&rdev->irq.pflip[0]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) radeon_crtc_handle_vblank(rdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) if (status & RADEON_CRTC2_VBLANK_STAT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) if (rdev->irq.crtc_vblank_int[1]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) drm_handle_vblank(rdev->ddev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) rdev->pm.vblank_sync = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) wake_up(&rdev->irq.vblank_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) if (atomic_read(&rdev->irq.pflip[1]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) radeon_crtc_handle_vblank(rdev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) if (status & RADEON_FP_DETECT_STAT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) queue_hotplug = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) DRM_DEBUG("HPD1\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) if (status & RADEON_FP2_DETECT_STAT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) queue_hotplug = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) DRM_DEBUG("HPD2\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) status = r100_irq_ack(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (queue_hotplug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) schedule_delayed_work(&rdev->hotplug_work, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) if (rdev->msi_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) switch (rdev->family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) case CHIP_RS400:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) case CHIP_RS480:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) msi_rearm = RREG32(RADEON_AIC_CNTL) & ~RS400_MSI_REARM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) WREG32(RADEON_AIC_CNTL, msi_rearm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) if (crtc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) return RREG32(RADEON_CRTC_CRNT_FRAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) return RREG32(RADEON_CRTC2_CRNT_FRAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) * r100_ring_hdp_flush - flush Host Data Path via the ring buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) * rdev: radeon device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) * ring: ring buffer struct for emitting packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) static void r100_ring_hdp_flush(struct radeon_device *rdev, struct radeon_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) RADEON_HDP_READ_BUFFER_INVALIDATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) /* Who ever call radeon_fence_emit should call ring_lock and ask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) * for enough space (today caller are ib schedule and buffer move) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) void r100_fence_ring_emit(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) struct radeon_fence *fence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) struct radeon_ring *ring = &rdev->ring[fence->ring];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) /* We have to make sure that caches are flushed before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) * CPU might read something from VRAM. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) radeon_ring_write(ring, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) radeon_ring_write(ring, RADEON_RB3D_DC_FLUSH_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) radeon_ring_write(ring, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) radeon_ring_write(ring, RADEON_RB3D_ZC_FLUSH_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) /* Wait until IDLE & CLEAN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) r100_ring_hdp_flush(rdev, ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) /* Emit fence sequence & fire IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) radeon_ring_write(ring, fence->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) radeon_ring_write(ring, RADEON_SW_INT_FIRE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) bool r100_semaphore_ring_emit(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) struct radeon_ring *ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) struct radeon_semaphore *semaphore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) bool emit_wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) /* Unused on older asics, since we don't have semaphores or multiple rings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) struct radeon_fence *r100_copy_blit(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) uint64_t src_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) uint64_t dst_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) unsigned num_gpu_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) struct dma_resv *resv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) struct radeon_fence *fence;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) uint32_t cur_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) uint32_t pitch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) uint32_t stride_pixels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) unsigned ndw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) int num_loops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) /* radeon limited to 16k stride */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) stride_bytes &= 0x3fff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) /* radeon pitch is /64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) pitch = stride_bytes / 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) stride_pixels = stride_bytes / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) num_loops = DIV_ROUND_UP(num_gpu_pages, 8191);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) /* Ask for enough room for blit + flush + fence */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) ndw = 64 + (10 * num_loops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) r = radeon_ring_lock(rdev, ring, ndw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) while (num_gpu_pages > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) cur_pages = num_gpu_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if (cur_pages > 8191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) cur_pages = 8191;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) num_gpu_pages -= cur_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) /* pages are in Y direction - height
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) page width in X direction - width */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) radeon_ring_write(ring, PACKET3(PACKET3_BITBLT_MULTI, 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) radeon_ring_write(ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) RADEON_GMC_DST_PITCH_OFFSET_CNTL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) RADEON_GMC_SRC_CLIPPING |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) RADEON_GMC_DST_CLIPPING |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) RADEON_GMC_BRUSH_NONE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) (RADEON_COLOR_FORMAT_ARGB8888 << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) RADEON_GMC_SRC_DATATYPE_COLOR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) RADEON_ROP3_S |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) RADEON_DP_SRC_SOURCE_MEMORY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) RADEON_GMC_CLR_CMP_CNTL_DIS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) RADEON_GMC_WR_MSK_DIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) radeon_ring_write(ring, (pitch << 22) | (src_offset >> 10));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) radeon_ring_write(ring, (pitch << 22) | (dst_offset >> 10));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) radeon_ring_write(ring, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) radeon_ring_write(ring, num_gpu_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) radeon_ring_write(ring, num_gpu_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) radeon_ring_write(ring, cur_pages | (stride_pixels << 16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) radeon_ring_write(ring, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) radeon_ring_write(ring, RADEON_RB2D_DC_FLUSH_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) radeon_ring_write(ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) RADEON_WAIT_2D_IDLECLEAN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) RADEON_WAIT_HOST_IDLECLEAN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) RADEON_WAIT_DMA_GUI_IDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) r = radeon_fence_emit(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) radeon_ring_unlock_undo(rdev, ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) return ERR_PTR(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) radeon_ring_unlock_commit(rdev, ring, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) return fence;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) static int r100_cp_wait_for_idle(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) for (i = 0; i < rdev->usec_timeout; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) tmp = RREG32(R_000E40_RBBM_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) if (!G_000E40_CP_CMDSTRM_BUSY(tmp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) r = radeon_ring_lock(rdev, ring, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) radeon_ring_write(ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) RADEON_ISYNC_ANY2D_IDLE3D |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) RADEON_ISYNC_ANY3D_IDLE2D |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) RADEON_ISYNC_WAIT_IDLEGUI |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) RADEON_ISYNC_CPSCRATCH_IDLEGUI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) radeon_ring_unlock_commit(rdev, ring, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) /* Load the microcode for the CP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) static int r100_cp_init_microcode(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) const char *fw_name = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) DRM_DEBUG_KMS("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) (rdev->family == CHIP_RS200)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) DRM_INFO("Loading R100 Microcode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) fw_name = FIRMWARE_R100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) } else if ((rdev->family == CHIP_R200) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) (rdev->family == CHIP_RV250) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) (rdev->family == CHIP_RV280) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) (rdev->family == CHIP_RS300)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) DRM_INFO("Loading R200 Microcode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) fw_name = FIRMWARE_R200;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) } else if ((rdev->family == CHIP_R300) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) (rdev->family == CHIP_R350) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) (rdev->family == CHIP_RV350) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) (rdev->family == CHIP_RV380) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) (rdev->family == CHIP_RS400) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) (rdev->family == CHIP_RS480)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) DRM_INFO("Loading R300 Microcode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) fw_name = FIRMWARE_R300;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) } else if ((rdev->family == CHIP_R420) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) (rdev->family == CHIP_R423) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) (rdev->family == CHIP_RV410)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) DRM_INFO("Loading R400 Microcode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) fw_name = FIRMWARE_R420;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) } else if ((rdev->family == CHIP_RS690) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) (rdev->family == CHIP_RS740)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) DRM_INFO("Loading RS690/RS740 Microcode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) fw_name = FIRMWARE_RS690;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) } else if (rdev->family == CHIP_RS600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) DRM_INFO("Loading RS600 Microcode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) fw_name = FIRMWARE_RS600;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) } else if ((rdev->family == CHIP_RV515) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) (rdev->family == CHIP_R520) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) (rdev->family == CHIP_RV530) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) (rdev->family == CHIP_R580) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) (rdev->family == CHIP_RV560) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) (rdev->family == CHIP_RV570)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) DRM_INFO("Loading R500 Microcode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) fw_name = FIRMWARE_R520;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) pr_err("radeon_cp: Failed to load firmware \"%s\"\n", fw_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) } else if (rdev->me_fw->size % 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) pr_err("radeon_cp: Bogus length %zu in firmware \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) rdev->me_fw->size, fw_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) release_firmware(rdev->me_fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) rdev->me_fw = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) u32 r100_gfx_get_rptr(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) struct radeon_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) u32 rptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) if (rdev->wb.enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) rptr = RREG32(RADEON_CP_RB_RPTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) return rptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) u32 r100_gfx_get_wptr(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) struct radeon_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) return RREG32(RADEON_CP_RB_WPTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) void r100_gfx_set_wptr(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) struct radeon_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) WREG32(RADEON_CP_RB_WPTR, ring->wptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) (void)RREG32(RADEON_CP_RB_WPTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) static void r100_cp_load_microcode(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) const __be32 *fw_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) int i, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) if (r100_gui_wait_for_idle(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) pr_warn("Failed to wait GUI idle while programming pipes. Bad things might happen.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) if (rdev->me_fw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) size = rdev->me_fw->size / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) fw_data = (const __be32 *)&rdev->me_fw->data[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) WREG32(RADEON_CP_ME_RAM_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) for (i = 0; i < size; i += 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) WREG32(RADEON_CP_ME_RAM_DATAH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) be32_to_cpup(&fw_data[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) WREG32(RADEON_CP_ME_RAM_DATAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) be32_to_cpup(&fw_data[i + 1]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) unsigned rb_bufsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) unsigned rb_blksz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) unsigned max_fetch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) unsigned pre_write_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) unsigned pre_write_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) unsigned indirect2_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) unsigned indirect1_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) uint32_t tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) if (r100_debugfs_cp_init(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) DRM_ERROR("Failed to register debugfs file for CP !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) if (!rdev->me_fw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) r = r100_cp_init_microcode(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) DRM_ERROR("Failed to load firmware!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) /* Align ring size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) rb_bufsz = order_base_2(ring_size / 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) ring_size = (1 << (rb_bufsz + 1)) * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) r100_cp_load_microcode(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) RADEON_CP_PACKET2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) /* Each time the cp read 1024 bytes (16 dword/quadword) update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) * the rptr copy in system ram */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) rb_blksz = 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) /* cp will read 128bytes at a time (4 dwords) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) max_fetch = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) ring->align_mask = 16 - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) /* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) pre_write_timer = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) /* Force CP_RB_WPTR write if written more than one time before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) * delay expire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) pre_write_limit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) /* Setup the cp cache like this (cache size is 96 dwords) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) * RING 0 to 15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) * INDIRECT1 16 to 79
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) * INDIRECT2 80 to 95
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) * indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) * indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) * Idea being that most of the gpu cmd will be through indirect1 buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) * so it gets the bigger cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) indirect2_start = 80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) indirect1_start = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) /* cp setup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) WREG32(0x718, pre_write_timer | (pre_write_limit << 28));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) REG_SET(RADEON_RB_BLKSZ, rb_blksz) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) REG_SET(RADEON_MAX_FETCH, max_fetch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) #ifdef __BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) tmp |= RADEON_BUF_SWAP_32BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) /* Set ring address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)ring->gpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) WREG32(RADEON_CP_RB_BASE, ring->gpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) /* Force read & write ptr to 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) WREG32(RADEON_CP_RB_RPTR_WR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) ring->wptr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) WREG32(RADEON_CP_RB_WPTR, ring->wptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) /* set the wb address whether it's enabled or not */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) WREG32(R_00070C_CP_RB_RPTR_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) >> 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) if (rdev->wb.enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) WREG32(R_000770_SCRATCH_UMSK, 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) tmp |= RADEON_RB_NO_UPDATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) WREG32(R_000770_SCRATCH_UMSK, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) WREG32(RADEON_CP_RB_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) /* Set cp mode to bus mastering & enable cp*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) WREG32(RADEON_CP_CSQ_MODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) REG_SET(RADEON_INDIRECT1_START, indirect1_start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) WREG32(RADEON_CP_RB_WPTR_DELAY, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) /* at this point everything should be setup correctly to enable master */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) pci_set_master(rdev->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) radeon_ring_start(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) DRM_ERROR("radeon: cp isn't working (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) ring->ready = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) if (!ring->rptr_save_reg /* not resuming from suspend */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) && radeon_ring_supports_scratch_reg(rdev, ring)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) ring->rptr_save_reg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) void r100_cp_fini(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) if (r100_cp_wait_for_idle(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) DRM_ERROR("Wait for CP idle timeout, shutting down CP.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) /* Disable ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) r100_cp_disable(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) radeon_scratch_free(rdev, rdev->ring[RADEON_RING_TYPE_GFX_INDEX].rptr_save_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) DRM_INFO("radeon: cp finalized\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) void r100_cp_disable(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) /* Disable ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) WREG32(RADEON_CP_CSQ_MODE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) WREG32(RADEON_CP_CSQ_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) WREG32(R_000770_SCRATCH_UMSK, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) if (r100_gui_wait_for_idle(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) pr_warn("Failed to wait GUI idle while programming pipes. Bad things might happen.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) * CS functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) struct radeon_cs_packet *pkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) unsigned idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) unsigned reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) u32 tile_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) struct radeon_bo_list *reloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) r = radeon_cs_packet_next_reloc(p, &reloc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) idx, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) radeon_cs_dump_packet(p, pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) value = radeon_get_ib_value(p, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) tmp = value & 0x003fffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) tmp += (((u32)reloc->gpu_offset) >> 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) if (reloc->tiling_flags & RADEON_TILING_MACRO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) tile_flags |= RADEON_DST_TILE_MACRO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) if (reloc->tiling_flags & RADEON_TILING_MICRO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) if (reg == RADEON_SRC_PITCH_OFFSET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) DRM_ERROR("Cannot src blit from microtiled surface\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) radeon_cs_dump_packet(p, pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) tile_flags |= RADEON_DST_TILE_MICRO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) tmp |= tile_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) p->ib.ptr[idx] = (value & 0x3fc00000) | tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) p->ib.ptr[idx] = (value & 0xffc00000) | tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) struct radeon_cs_packet *pkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) unsigned c, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) struct radeon_bo_list *reloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) struct r100_cs_track *track;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) volatile uint32_t *ib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) u32 idx_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) ib = p->ib.ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) track = (struct r100_cs_track *)p->track;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) c = radeon_get_ib_value(p, idx++) & 0x1F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) if (c > 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) DRM_ERROR("Only 16 vertex buffers are allowed %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) pkt->opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) radeon_cs_dump_packet(p, pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) track->num_arrays = c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) for (i = 0; i < (c - 1); i+=2, idx+=3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) r = radeon_cs_packet_next_reloc(p, &reloc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) DRM_ERROR("No reloc for packet3 %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) pkt->opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) radeon_cs_dump_packet(p, pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) idx_value = radeon_get_ib_value(p, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) track->arrays[i + 0].esize = idx_value >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) track->arrays[i + 0].robj = reloc->robj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) track->arrays[i + 0].esize &= 0x7F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) r = radeon_cs_packet_next_reloc(p, &reloc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) DRM_ERROR("No reloc for packet3 %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) pkt->opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) radeon_cs_dump_packet(p, pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->gpu_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) track->arrays[i + 1].robj = reloc->robj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) track->arrays[i + 1].esize = idx_value >> 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) track->arrays[i + 1].esize &= 0x7F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) if (c & 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) r = radeon_cs_packet_next_reloc(p, &reloc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) DRM_ERROR("No reloc for packet3 %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) pkt->opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) radeon_cs_dump_packet(p, pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) idx_value = radeon_get_ib_value(p, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) track->arrays[i + 0].robj = reloc->robj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) track->arrays[i + 0].esize = idx_value >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) track->arrays[i + 0].esize &= 0x7F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) int r100_cs_parse_packet0(struct radeon_cs_parser *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) struct radeon_cs_packet *pkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) const unsigned *auth, unsigned n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) radeon_packet0_check_t check)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) unsigned reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) unsigned i, j, m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) unsigned idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) idx = pkt->idx + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) reg = pkt->reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) /* Check that register fall into register range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) * determined by the number of entry (n) in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) * safe register bitmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) if (pkt->one_reg_wr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) if ((reg >> 7) > n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) if (((reg + (pkt->count << 2)) >> 7) > n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) for (i = 0; i <= pkt->count; i++, idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) j = (reg >> 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) m = 1 << ((reg >> 2) & 31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) if (auth[j] & m) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) r = check(p, pkt, idx, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) if (pkt->one_reg_wr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) if (!(auth[j] & m)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) reg += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) * r100_cs_packet_next_vline() - parse userspace VLINE packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) * @parser: parser structure holding parsing context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) * Userspace sends a special sequence for VLINE waits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) * PACKET0 - VLINE_START_END + value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) * PACKET0 - WAIT_UNTIL +_value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) * RELOC (P3) - crtc_id in reloc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) * This function parses this and relocates the VLINE START END
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) * and WAIT UNTIL packets to the correct crtc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) * It also detects a switched off crtc and nulls out the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) * wait in that case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) struct drm_crtc *crtc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) struct radeon_crtc *radeon_crtc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) struct radeon_cs_packet p3reloc, waitreloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) int crtc_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) uint32_t header, h_idx, reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) volatile uint32_t *ib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) ib = p->ib.ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) /* parse the wait until */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) r = radeon_cs_packet_parse(p, &waitreloc, p->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) /* check its a wait until and only 1 count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) if (waitreloc.reg != RADEON_WAIT_UNTIL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) waitreloc.count != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) DRM_ERROR("vline wait had illegal wait until segment\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) DRM_ERROR("vline wait had illegal wait until\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) /* jump over the NOP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) r = radeon_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) h_idx = p->idx - 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) p->idx += waitreloc.count + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) p->idx += p3reloc.count + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) header = radeon_get_ib_value(p, h_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) crtc_id = radeon_get_ib_value(p, h_idx + 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) reg = R100_CP_PACKET0_GET_REG(header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) crtc = drm_crtc_find(p->rdev->ddev, p->filp, crtc_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) if (!crtc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) DRM_ERROR("cannot find crtc %d\n", crtc_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) radeon_crtc = to_radeon_crtc(crtc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) crtc_id = radeon_crtc->crtc_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) if (!crtc->enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) /* if the CRTC isn't enabled - we need to nop out the wait until */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) ib[h_idx + 2] = PACKET2(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) ib[h_idx + 3] = PACKET2(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) } else if (crtc_id == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) switch (reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) case AVIVO_D1MODE_VLINE_START_END:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) header &= ~R300_CP_PACKET0_REG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) header |= AVIVO_D2MODE_VLINE_START_END >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) case RADEON_CRTC_GUI_TRIG_VLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) header &= ~R300_CP_PACKET0_REG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) DRM_ERROR("unknown crtc reloc\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) ib[h_idx] = header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) static int r100_get_vtx_size(uint32_t vtx_fmt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) int vtx_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) vtx_size = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) /* ordered according to bits in spec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) if (vtx_fmt & RADEON_SE_VTX_FMT_W0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) vtx_size++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) if (vtx_fmt & RADEON_SE_VTX_FMT_FPCOLOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) vtx_size += 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) if (vtx_fmt & RADEON_SE_VTX_FMT_FPALPHA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) vtx_size++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) if (vtx_fmt & RADEON_SE_VTX_FMT_PKCOLOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) vtx_size++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) if (vtx_fmt & RADEON_SE_VTX_FMT_FPSPEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) vtx_size += 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) if (vtx_fmt & RADEON_SE_VTX_FMT_FPFOG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) vtx_size++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) if (vtx_fmt & RADEON_SE_VTX_FMT_PKSPEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) vtx_size++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) if (vtx_fmt & RADEON_SE_VTX_FMT_ST0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) vtx_size += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) if (vtx_fmt & RADEON_SE_VTX_FMT_ST1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) vtx_size += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) if (vtx_fmt & RADEON_SE_VTX_FMT_Q1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) vtx_size++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) if (vtx_fmt & RADEON_SE_VTX_FMT_ST2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) vtx_size += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) if (vtx_fmt & RADEON_SE_VTX_FMT_Q2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) vtx_size++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) if (vtx_fmt & RADEON_SE_VTX_FMT_ST3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) vtx_size += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) if (vtx_fmt & RADEON_SE_VTX_FMT_Q3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) vtx_size++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) if (vtx_fmt & RADEON_SE_VTX_FMT_Q0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) vtx_size++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) /* blend weight */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) if (vtx_fmt & (0x7 << 15))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) vtx_size += (vtx_fmt >> 15) & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) if (vtx_fmt & RADEON_SE_VTX_FMT_N0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) vtx_size += 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) if (vtx_fmt & RADEON_SE_VTX_FMT_XY1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) vtx_size += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) if (vtx_fmt & RADEON_SE_VTX_FMT_Z1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) vtx_size++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) if (vtx_fmt & RADEON_SE_VTX_FMT_W1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) vtx_size++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) if (vtx_fmt & RADEON_SE_VTX_FMT_N1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) vtx_size++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) if (vtx_fmt & RADEON_SE_VTX_FMT_Z)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) vtx_size++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) return vtx_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) static int r100_packet0_check(struct radeon_cs_parser *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) struct radeon_cs_packet *pkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) unsigned idx, unsigned reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) struct radeon_bo_list *reloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) struct r100_cs_track *track;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) volatile uint32_t *ib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) uint32_t tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) int i, face;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) u32 tile_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) u32 idx_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) ib = p->ib.ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) track = (struct r100_cs_track *)p->track;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) idx_value = radeon_get_ib_value(p, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) switch (reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) case RADEON_CRTC_GUI_TRIG_VLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) r = r100_cs_packet_parse_vline(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) idx, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) radeon_cs_dump_packet(p, pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) /* FIXME: only allow PACKET3 blit? easier to check for out of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) * range access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) case RADEON_DST_PITCH_OFFSET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) case RADEON_SRC_PITCH_OFFSET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) r = r100_reloc_pitch_offset(p, pkt, idx, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) case RADEON_RB3D_DEPTHOFFSET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) r = radeon_cs_packet_next_reloc(p, &reloc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) idx, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) radeon_cs_dump_packet(p, pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) track->zb.robj = reloc->robj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) track->zb.offset = idx_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) track->zb_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) ib[idx] = idx_value + ((u32)reloc->gpu_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) case RADEON_RB3D_COLOROFFSET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) r = radeon_cs_packet_next_reloc(p, &reloc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) idx, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) radeon_cs_dump_packet(p, pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) track->cb[0].robj = reloc->robj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) track->cb[0].offset = idx_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) track->cb_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) ib[idx] = idx_value + ((u32)reloc->gpu_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) case RADEON_PP_TXOFFSET_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) case RADEON_PP_TXOFFSET_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) case RADEON_PP_TXOFFSET_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) i = (reg - RADEON_PP_TXOFFSET_0) / 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) r = radeon_cs_packet_next_reloc(p, &reloc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) idx, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) radeon_cs_dump_packet(p, pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) if (reloc->tiling_flags & RADEON_TILING_MACRO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) tile_flags |= RADEON_TXO_MACRO_TILE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) if (reloc->tiling_flags & RADEON_TILING_MICRO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) tile_flags |= RADEON_TXO_MICRO_TILE_X2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) tmp = idx_value & ~(0x7 << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) tmp |= tile_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) ib[idx] = tmp + ((u32)reloc->gpu_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) ib[idx] = idx_value + ((u32)reloc->gpu_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) track->textures[i].robj = reloc->robj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) track->tex_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) case RADEON_PP_CUBIC_OFFSET_T0_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) case RADEON_PP_CUBIC_OFFSET_T0_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) case RADEON_PP_CUBIC_OFFSET_T0_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) case RADEON_PP_CUBIC_OFFSET_T0_3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) case RADEON_PP_CUBIC_OFFSET_T0_4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) r = radeon_cs_packet_next_reloc(p, &reloc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) idx, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) radeon_cs_dump_packet(p, pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) track->textures[0].cube_info[i].offset = idx_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) ib[idx] = idx_value + ((u32)reloc->gpu_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) track->textures[0].cube_info[i].robj = reloc->robj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) track->tex_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) case RADEON_PP_CUBIC_OFFSET_T1_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) case RADEON_PP_CUBIC_OFFSET_T1_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) case RADEON_PP_CUBIC_OFFSET_T1_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) case RADEON_PP_CUBIC_OFFSET_T1_3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) case RADEON_PP_CUBIC_OFFSET_T1_4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) r = radeon_cs_packet_next_reloc(p, &reloc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) idx, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) radeon_cs_dump_packet(p, pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) track->textures[1].cube_info[i].offset = idx_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) ib[idx] = idx_value + ((u32)reloc->gpu_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) track->textures[1].cube_info[i].robj = reloc->robj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) track->tex_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) case RADEON_PP_CUBIC_OFFSET_T2_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) case RADEON_PP_CUBIC_OFFSET_T2_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) case RADEON_PP_CUBIC_OFFSET_T2_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) case RADEON_PP_CUBIC_OFFSET_T2_3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) case RADEON_PP_CUBIC_OFFSET_T2_4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) r = radeon_cs_packet_next_reloc(p, &reloc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) idx, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) radeon_cs_dump_packet(p, pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) track->textures[2].cube_info[i].offset = idx_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) ib[idx] = idx_value + ((u32)reloc->gpu_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) track->textures[2].cube_info[i].robj = reloc->robj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) track->tex_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) case RADEON_RE_WIDTH_HEIGHT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) track->maxy = ((idx_value >> 16) & 0x7FF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) track->cb_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) track->zb_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) case RADEON_RB3D_COLORPITCH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) r = radeon_cs_packet_next_reloc(p, &reloc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) idx, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) radeon_cs_dump_packet(p, pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) if (reloc->tiling_flags & RADEON_TILING_MACRO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) tile_flags |= RADEON_COLOR_TILE_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) if (reloc->tiling_flags & RADEON_TILING_MICRO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) tmp = idx_value & ~(0x7 << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) tmp |= tile_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) ib[idx] = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) ib[idx] = idx_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) track->cb_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) case RADEON_RB3D_DEPTHPITCH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) track->zb_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) case RADEON_RB3D_CNTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) case 7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) case 9:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) case 11:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) case 12:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) track->cb[0].cpp = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) case 15:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) track->cb[0].cpp = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) case 6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) track->cb[0].cpp = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) DRM_ERROR("Invalid color buffer format (%d) !\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) track->cb_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) track->zb_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) case RADEON_RB3D_ZSTENCILCNTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) switch (idx_value & 0xf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) track->zb.cpp = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) case 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) case 9:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) case 11:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) track->zb.cpp = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) track->zb_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) case RADEON_RB3D_ZPASS_ADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) r = radeon_cs_packet_next_reloc(p, &reloc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) idx, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) radeon_cs_dump_packet(p, pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) ib[idx] = idx_value + ((u32)reloc->gpu_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) case RADEON_PP_CNTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) uint32_t temp = idx_value >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) for (i = 0; i < track->num_texture; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) track->textures[i].enabled = !!(temp & (1 << i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) track->tex_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) case RADEON_SE_VF_CNTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) track->vap_vf_cntl = idx_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) case RADEON_SE_VTX_FMT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) track->vtx_size = r100_get_vtx_size(idx_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) case RADEON_PP_TEX_SIZE_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) case RADEON_PP_TEX_SIZE_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) case RADEON_PP_TEX_SIZE_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) i = (reg - RADEON_PP_TEX_SIZE_0) / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) track->tex_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) case RADEON_PP_TEX_PITCH_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) case RADEON_PP_TEX_PITCH_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) case RADEON_PP_TEX_PITCH_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) i = (reg - RADEON_PP_TEX_PITCH_0) / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) track->textures[i].pitch = idx_value + 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) track->tex_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) case RADEON_PP_TXFILTER_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) case RADEON_PP_TXFILTER_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) case RADEON_PP_TXFILTER_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) i = (reg - RADEON_PP_TXFILTER_0) / 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) track->textures[i].num_levels = ((idx_value & RADEON_MAX_MIP_LEVEL_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) >> RADEON_MAX_MIP_LEVEL_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) tmp = (idx_value >> 23) & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) if (tmp == 2 || tmp == 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) track->textures[i].roundup_w = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) tmp = (idx_value >> 27) & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) if (tmp == 2 || tmp == 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) track->textures[i].roundup_h = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) track->tex_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) case RADEON_PP_TXFORMAT_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) case RADEON_PP_TXFORMAT_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) case RADEON_PP_TXFORMAT_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) i = (reg - RADEON_PP_TXFORMAT_0) / 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) if (idx_value & RADEON_TXFORMAT_NON_POWER2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) track->textures[i].use_pitch = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) track->textures[i].use_pitch = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) track->textures[i].width = 1 << ((idx_value & RADEON_TXFORMAT_WIDTH_MASK) >> RADEON_TXFORMAT_WIDTH_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) track->textures[i].height = 1 << ((idx_value & RADEON_TXFORMAT_HEIGHT_MASK) >> RADEON_TXFORMAT_HEIGHT_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) track->textures[i].tex_coord_type = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) case RADEON_TXFORMAT_I8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) case RADEON_TXFORMAT_RGB332:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) case RADEON_TXFORMAT_Y8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) track->textures[i].cpp = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) track->textures[i].compress_format = R100_TRACK_COMP_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) case RADEON_TXFORMAT_AI88:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) case RADEON_TXFORMAT_ARGB1555:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) case RADEON_TXFORMAT_RGB565:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) case RADEON_TXFORMAT_ARGB4444:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) case RADEON_TXFORMAT_VYUY422:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) case RADEON_TXFORMAT_YVYU422:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) case RADEON_TXFORMAT_SHADOW16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) case RADEON_TXFORMAT_LDUDV655:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) case RADEON_TXFORMAT_DUDV88:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) track->textures[i].cpp = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) track->textures[i].compress_format = R100_TRACK_COMP_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) case RADEON_TXFORMAT_ARGB8888:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) case RADEON_TXFORMAT_RGBA8888:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) case RADEON_TXFORMAT_SHADOW32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) case RADEON_TXFORMAT_LDUDUV8888:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) track->textures[i].cpp = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) track->textures[i].compress_format = R100_TRACK_COMP_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) case RADEON_TXFORMAT_DXT1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) track->textures[i].cpp = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) case RADEON_TXFORMAT_DXT23:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) case RADEON_TXFORMAT_DXT45:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) track->textures[i].cpp = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) track->tex_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) case RADEON_PP_CUBIC_FACES_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) case RADEON_PP_CUBIC_FACES_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) case RADEON_PP_CUBIC_FACES_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) tmp = idx_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) i = (reg - RADEON_PP_CUBIC_FACES_0) / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) for (face = 0; face < 4; face++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) track->tex_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) pr_err("Forbidden register 0x%04X in cs at %d\n", reg, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) struct radeon_cs_packet *pkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) struct radeon_bo *robj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) unsigned idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) u32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) idx = pkt->idx + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) value = radeon_get_ib_value(p, idx + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) if ((value + 1) > radeon_bo_size(robj)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) "(need %u have %lu) !\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) value + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) radeon_bo_size(robj));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) static int r100_packet3_check(struct radeon_cs_parser *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) struct radeon_cs_packet *pkt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) struct radeon_bo_list *reloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) struct r100_cs_track *track;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) unsigned idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) volatile uint32_t *ib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) ib = p->ib.ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) idx = pkt->idx + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) track = (struct r100_cs_track *)p->track;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) switch (pkt->opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) case PACKET3_3D_LOAD_VBPNTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) r = r100_packet3_load_vbpntr(p, pkt, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) case PACKET3_INDX_BUFFER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) r = radeon_cs_packet_next_reloc(p, &reloc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) radeon_cs_dump_packet(p, pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->gpu_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) case 0x23:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) /* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) r = radeon_cs_packet_next_reloc(p, &reloc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) radeon_cs_dump_packet(p, pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->gpu_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) track->num_arrays = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) track->arrays[0].robj = reloc->robj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) track->arrays[0].esize = track->vtx_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) track->max_indx = radeon_get_ib_value(p, idx+1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) track->vap_vf_cntl = radeon_get_ib_value(p, idx+3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) track->immd_dwords = pkt->count - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) r = r100_cs_track_check(p->rdev, track);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) case PACKET3_3D_DRAW_IMMD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) track->immd_dwords = pkt->count - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) r = r100_cs_track_check(p->rdev, track);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) /* triggers drawing using in-packet vertex data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) case PACKET3_3D_DRAW_IMMD_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) track->vap_vf_cntl = radeon_get_ib_value(p, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) track->immd_dwords = pkt->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) r = r100_cs_track_check(p->rdev, track);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) /* triggers drawing using in-packet vertex data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) case PACKET3_3D_DRAW_VBUF_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) track->vap_vf_cntl = radeon_get_ib_value(p, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) r = r100_cs_track_check(p->rdev, track);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) /* triggers drawing of vertex buffers setup elsewhere */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) case PACKET3_3D_DRAW_INDX_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) track->vap_vf_cntl = radeon_get_ib_value(p, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) r = r100_cs_track_check(p->rdev, track);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) /* triggers drawing using indices to vertex buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) case PACKET3_3D_DRAW_VBUF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) r = r100_cs_track_check(p->rdev, track);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) /* triggers drawing of vertex buffers setup elsewhere */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) case PACKET3_3D_DRAW_INDX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) r = r100_cs_track_check(p->rdev, track);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) /* triggers drawing using indices to vertex buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) case PACKET3_3D_CLEAR_HIZ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) case PACKET3_3D_CLEAR_ZMASK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) if (p->rdev->hyperz_filp != p->filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) case PACKET3_NOP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) int r100_cs_parse(struct radeon_cs_parser *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) struct radeon_cs_packet pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) struct r100_cs_track *track;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) track = kzalloc(sizeof(*track), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) if (!track)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) r100_cs_track_clear(p->rdev, track);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) p->track = track;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) r = radeon_cs_packet_parse(p, &pkt, p->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) p->idx += pkt.count + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) switch (pkt.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) case RADEON_PACKET_TYPE0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) if (p->rdev->family >= CHIP_R200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) r = r100_cs_parse_packet0(p, &pkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) p->rdev->config.r100.reg_safe_bm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) p->rdev->config.r100.reg_safe_bm_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) &r200_packet0_check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) r = r100_cs_parse_packet0(p, &pkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) p->rdev->config.r100.reg_safe_bm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) p->rdev->config.r100.reg_safe_bm_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) &r100_packet0_check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) case RADEON_PACKET_TYPE2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) case RADEON_PACKET_TYPE3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) r = r100_packet3_check(p, &pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) DRM_ERROR("Unknown packet type %d !\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) pkt.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) } while (p->idx < p->chunk_ib->length_dw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) static void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) DRM_ERROR("pitch %d\n", t->pitch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) DRM_ERROR("use_pitch %d\n", t->use_pitch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) DRM_ERROR("width %d\n", t->width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) DRM_ERROR("width_11 %d\n", t->width_11);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) DRM_ERROR("height %d\n", t->height);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) DRM_ERROR("height_11 %d\n", t->height_11);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) DRM_ERROR("num levels %d\n", t->num_levels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) DRM_ERROR("depth %d\n", t->txdepth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) DRM_ERROR("bpp %d\n", t->cpp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) DRM_ERROR("coordinate type %d\n", t->tex_coord_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) DRM_ERROR("width round to power of 2 %d\n", t->roundup_w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) DRM_ERROR("compress format %d\n", t->compress_format);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) static int r100_track_compress_size(int compress_format, int w, int h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) int block_width, block_height, block_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) int wblocks, hblocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) int min_wblocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) int sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) block_width = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) block_height = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) switch (compress_format) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) case R100_TRACK_COMP_DXT1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) block_bytes = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) min_wblocks = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) case R100_TRACK_COMP_DXT35:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) block_bytes = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) min_wblocks = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) hblocks = (h + block_height - 1) / block_height;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) wblocks = (w + block_width - 1) / block_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) if (wblocks < min_wblocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) wblocks = min_wblocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) sz = wblocks * hblocks * block_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) return sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) static int r100_cs_track_cube(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) struct r100_cs_track *track, unsigned idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) unsigned face, w, h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) struct radeon_bo *cube_robj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) unsigned compress_format = track->textures[idx].compress_format;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) for (face = 0; face < 5; face++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) cube_robj = track->textures[idx].cube_info[face].robj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) w = track->textures[idx].cube_info[face].width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) h = track->textures[idx].cube_info[face].height;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) if (compress_format) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) size = r100_track_compress_size(compress_format, w, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) size = w * h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) size *= track->textures[idx].cpp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) size += track->textures[idx].cube_info[face].offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) if (size > radeon_bo_size(cube_robj)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) size, radeon_bo_size(cube_robj));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) r100_cs_track_texture_print(&track->textures[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) static int r100_cs_track_texture_check(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) struct r100_cs_track *track)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) struct radeon_bo *robj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) unsigned u, i, w, h, d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) for (u = 0; u < track->num_texture; u++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) if (!track->textures[u].enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) if (track->textures[u].lookup_disable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) robj = track->textures[u].robj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) if (robj == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) DRM_ERROR("No texture bound to unit %u\n", u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) for (i = 0; i <= track->textures[u].num_levels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) if (track->textures[u].use_pitch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) if (rdev->family < CHIP_R300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) w = track->textures[u].pitch / (1 << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) w = track->textures[u].width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) if (rdev->family >= CHIP_RV515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) w |= track->textures[u].width_11;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) w = w / (1 << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) if (track->textures[u].roundup_w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) w = roundup_pow_of_two(w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) h = track->textures[u].height;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) if (rdev->family >= CHIP_RV515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) h |= track->textures[u].height_11;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) h = h / (1 << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) if (track->textures[u].roundup_h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) h = roundup_pow_of_two(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) if (track->textures[u].tex_coord_type == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) d = (1 << track->textures[u].txdepth) / (1 << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) if (!d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) d = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) d = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) if (track->textures[u].compress_format) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) /* compressed textures are block based */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) size += w * h * d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) size *= track->textures[u].cpp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) switch (track->textures[u].tex_coord_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) if (track->separate_cube) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) ret = r100_cs_track_cube(rdev, track, u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) size *= 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) DRM_ERROR("Invalid texture coordinate type %u for unit "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) "%u\n", track->textures[u].tex_coord_type, u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) if (size > radeon_bo_size(robj)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) DRM_ERROR("Texture of unit %u needs %lu bytes but is "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) "%lu\n", u, size, radeon_bo_size(robj));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) r100_cs_track_texture_print(&track->textures[u]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) unsigned prim_walk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) unsigned nverts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) unsigned num_cb = track->cb_dirty ? track->num_cb : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) if (num_cb && !track->zb_cb_clear && !track->color_channel_mask &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) !track->blend_read_enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) num_cb = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) for (i = 0; i < num_cb; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) if (track->cb[i].robj == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) size += track->cb[i].offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) if (size > radeon_bo_size(track->cb[i].robj)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) DRM_ERROR("[drm] Buffer too small for color buffer %d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) "(need %lu have %lu) !\n", i, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) radeon_bo_size(track->cb[i].robj));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) i, track->cb[i].pitch, track->cb[i].cpp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) track->cb[i].offset, track->maxy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) track->cb_dirty = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) if (track->zb_dirty && track->z_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) if (track->zb.robj == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) DRM_ERROR("[drm] No buffer for z buffer !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) size = track->zb.pitch * track->zb.cpp * track->maxy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) size += track->zb.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) if (size > radeon_bo_size(track->zb.robj)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) DRM_ERROR("[drm] Buffer too small for z buffer "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) "(need %lu have %lu) !\n", size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) radeon_bo_size(track->zb.robj));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) track->zb.pitch, track->zb.cpp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) track->zb.offset, track->maxy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) track->zb_dirty = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) if (track->aa_dirty && track->aaresolve) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) if (track->aa.robj == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) /* I believe the format comes from colorbuffer0. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) size = track->aa.pitch * track->cb[0].cpp * track->maxy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) size += track->aa.offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) if (size > radeon_bo_size(track->aa.robj)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) "(need %lu have %lu) !\n", i, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) radeon_bo_size(track->aa.robj));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) i, track->aa.pitch, track->cb[0].cpp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) track->aa.offset, track->maxy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) track->aa_dirty = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) if (track->vap_vf_cntl & (1 << 14)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) nverts = track->vap_alt_nverts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) switch (prim_walk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) for (i = 0; i < track->num_arrays; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) size = track->arrays[i].esize * track->max_indx * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) if (track->arrays[i].robj == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) DRM_ERROR("(PW %u) Vertex array %u no buffer "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) "bound\n", prim_walk, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) if (size > radeon_bo_size(track->arrays[i].robj)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) dev_err(rdev->dev, "(PW %u) Vertex array %u "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) "need %lu dwords have %lu dwords\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) prim_walk, i, size >> 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) radeon_bo_size(track->arrays[i].robj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) DRM_ERROR("Max indices %u\n", track->max_indx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) for (i = 0; i < track->num_arrays; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) size = track->arrays[i].esize * (nverts - 1) * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) if (track->arrays[i].robj == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) DRM_ERROR("(PW %u) Vertex array %u no buffer "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) "bound\n", prim_walk, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) if (size > radeon_bo_size(track->arrays[i].robj)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) dev_err(rdev->dev, "(PW %u) Vertex array %u "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) "need %lu dwords have %lu dwords\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) prim_walk, i, size >> 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) radeon_bo_size(track->arrays[i].robj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) size = track->vtx_size * nverts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) if (size != track->immd_dwords) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) track->immd_dwords, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) nverts, track->vtx_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) prim_walk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) if (track->tex_dirty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) track->tex_dirty = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) return r100_cs_track_texture_check(rdev, track);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) unsigned i, face;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) track->cb_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) track->zb_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) track->tex_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) track->aa_dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) if (rdev->family < CHIP_R300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) track->num_cb = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) if (rdev->family <= CHIP_RS200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) track->num_texture = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) track->num_texture = 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) track->maxy = 2048;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) track->separate_cube = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) track->num_cb = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) track->num_texture = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) track->maxy = 4096;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) track->separate_cube = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) track->aaresolve = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) track->aa.robj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) for (i = 0; i < track->num_cb; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) track->cb[i].robj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) track->cb[i].pitch = 8192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) track->cb[i].cpp = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) track->cb[i].offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) track->z_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) track->zb.robj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) track->zb.pitch = 8192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) track->zb.cpp = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) track->zb.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) track->vtx_size = 0x7F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) track->immd_dwords = 0xFFFFFFFFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) track->num_arrays = 11;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) track->max_indx = 0x00FFFFFFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) for (i = 0; i < track->num_arrays; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) track->arrays[i].robj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) track->arrays[i].esize = 0x7F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) for (i = 0; i < track->num_texture; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) track->textures[i].compress_format = R100_TRACK_COMP_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) track->textures[i].pitch = 16536;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) track->textures[i].width = 16536;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) track->textures[i].height = 16536;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) track->textures[i].width_11 = 1 << 11;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) track->textures[i].height_11 = 1 << 11;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) track->textures[i].num_levels = 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) if (rdev->family <= CHIP_RS200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) track->textures[i].tex_coord_type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) track->textures[i].txdepth = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) track->textures[i].txdepth = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) track->textures[i].tex_coord_type = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) track->textures[i].cpp = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) track->textures[i].robj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) /* CS IB emission code makes sure texture unit are disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) track->textures[i].enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) track->textures[i].lookup_disable = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) track->textures[i].roundup_w = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) track->textures[i].roundup_h = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) if (track->separate_cube)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) for (face = 0; face < 5; face++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) track->textures[i].cube_info[face].robj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) track->textures[i].cube_info[face].width = 16536;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) track->textures[i].cube_info[face].height = 16536;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) track->textures[i].cube_info[face].offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) * Global GPU functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) static void r100_errata(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) rdev->pll_errata = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) if (rdev->family == CHIP_RV100 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) rdev->family == CHIP_RS100 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) rdev->family == CHIP_RS200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) static int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) uint32_t tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) for (i = 0; i < rdev->usec_timeout; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) if (tmp >= n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) int r100_gui_wait_for_idle(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) uint32_t tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) pr_warn("radeon: wait for empty RBBM fifo failed! Bad things might happen.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) for (i = 0; i < rdev->usec_timeout; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) tmp = RREG32(RADEON_RBBM_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) if (!(tmp & RADEON_RBBM_ACTIVE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) int r100_mc_wait_for_idle(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) uint32_t tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) for (i = 0; i < rdev->usec_timeout; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) /* read MC_STATUS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) tmp = RREG32(RADEON_MC_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) if (tmp & RADEON_MC_IDLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) u32 rbbm_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) rbbm_status = RREG32(R_000E40_RBBM_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) radeon_ring_lockup_update(rdev, ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) return radeon_ring_test_lockup(rdev, ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) /* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) void r100_enable_bm(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) uint32_t tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) /* Enable bus mastering */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) WREG32(RADEON_BUS_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) void r100_bm_disable(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) /* disable bus mastering */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) tmp = RREG32(R_000030_BUS_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) tmp = RREG32(RADEON_BUS_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) pci_clear_master(rdev->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) int r100_asic_reset(struct radeon_device *rdev, bool hard)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) struct r100_mc_save save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) u32 status, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) status = RREG32(R_000E40_RBBM_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) if (!G_000E40_GUI_ACTIVE(status)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) r100_mc_stop(rdev, &save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) status = RREG32(R_000E40_RBBM_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) /* stop CP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) WREG32(RADEON_CP_CSQ_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) tmp = RREG32(RADEON_CP_RB_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) WREG32(RADEON_CP_RB_RPTR_WR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) WREG32(RADEON_CP_RB_WPTR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) WREG32(RADEON_CP_RB_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) /* save PCI state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) pci_save_state(rdev->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) /* disable bus mastering */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) r100_bm_disable(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) S_0000F0_SOFT_RESET_RE(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) S_0000F0_SOFT_RESET_PP(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) S_0000F0_SOFT_RESET_RB(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) RREG32(R_0000F0_RBBM_SOFT_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) mdelay(500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) status = RREG32(R_000E40_RBBM_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) /* reset CP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) RREG32(R_0000F0_RBBM_SOFT_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) mdelay(500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) status = RREG32(R_000E40_RBBM_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) /* restore PCI & busmastering */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) pci_restore_state(rdev->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) r100_enable_bm(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) /* Check if GPU is idle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) dev_err(rdev->dev, "failed to reset GPU\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) dev_info(rdev->dev, "GPU reset succeed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) r100_mc_resume(rdev, &save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) void r100_set_common_regs(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) struct drm_device *dev = rdev->ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) bool force_dac2 = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) /* set these so they don't interfere with anything */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) WREG32(RADEON_OV0_SCALE_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) WREG32(RADEON_SUBPIC_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) WREG32(RADEON_VIPH_CONTROL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) WREG32(RADEON_I2C_CNTL_1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) WREG32(RADEON_DVI_I2C_CNTL_1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) WREG32(RADEON_CAP0_TRIG_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) WREG32(RADEON_CAP1_TRIG_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) /* always set up dac2 on rn50 and some rv100 as lots
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) * of servers seem to wire it up to a VGA port but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) * don't report it in the bios connector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) * table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) switch (dev->pdev->device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) /* RN50 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) case 0x515e:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) case 0x5969:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) force_dac2 = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) /* RV100*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) case 0x5159:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) case 0x515a:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) /* DELL triple head servers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) if ((dev->pdev->subsystem_vendor == 0x1028 /* DELL */) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) ((dev->pdev->subsystem_device == 0x016c) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) (dev->pdev->subsystem_device == 0x016d) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) (dev->pdev->subsystem_device == 0x016e) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) (dev->pdev->subsystem_device == 0x016f) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) (dev->pdev->subsystem_device == 0x0170) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) (dev->pdev->subsystem_device == 0x017d) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) (dev->pdev->subsystem_device == 0x017e) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) (dev->pdev->subsystem_device == 0x0183) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) (dev->pdev->subsystem_device == 0x018a) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) (dev->pdev->subsystem_device == 0x019a)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) force_dac2 = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) if (force_dac2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) u32 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) u32 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) u32 dac2_cntl = RREG32(RADEON_DAC_CNTL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) /* For CRT on DAC2, don't turn it on if BIOS didn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) enable it, even it's detected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) /* force it to crtc0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) /* set up the TV DAC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) tv_dac_cntl &= ~(RADEON_TV_DAC_PEDESTAL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) RADEON_TV_DAC_STD_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) RADEON_TV_DAC_RDACPD |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) RADEON_TV_DAC_GDACPD |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) RADEON_TV_DAC_BDACPD |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) RADEON_TV_DAC_BGADJ_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) RADEON_TV_DAC_DACADJ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) tv_dac_cntl |= (RADEON_TV_DAC_NBLANK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) RADEON_TV_DAC_NHOLD |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) RADEON_TV_DAC_STD_PS2 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) (0x58 << 16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) WREG32(RADEON_DAC_CNTL2, dac2_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) /* switch PM block to ACPI mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) tmp &= ~RADEON_PM_MODE_SEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) * VRAM info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) static void r100_vram_get_type(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) uint32_t tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) rdev->mc.vram_is_ddr = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) if (rdev->flags & RADEON_IS_IGP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) rdev->mc.vram_is_ddr = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) rdev->mc.vram_is_ddr = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) if ((rdev->family == CHIP_RV100) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) (rdev->family == CHIP_RS100) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) (rdev->family == CHIP_RS200)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) tmp = RREG32(RADEON_MEM_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) if (tmp & RV100_HALF_MODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) rdev->mc.vram_width = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) rdev->mc.vram_width = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) if (rdev->flags & RADEON_SINGLE_CRTC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) rdev->mc.vram_width /= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) rdev->mc.vram_is_ddr = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) } else if (rdev->family <= CHIP_RV280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) tmp = RREG32(RADEON_MEM_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) rdev->mc.vram_width = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) rdev->mc.vram_width = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) /* newer IGPs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) rdev->mc.vram_width = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) static u32 r100_get_accessible_vram(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) u32 aper_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) u8 byte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) /* Set HDP_APER_CNTL only on cards that are known not to be broken,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) * that is has the 2nd generation multifunction PCI interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) if (rdev->family == CHIP_RV280 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) rdev->family >= CHIP_RV350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) ~RADEON_HDP_APER_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) DRM_INFO("Generation 2 PCI interface, using max accessible memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) return aper_size * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) /* Older cards have all sorts of funny issues to deal with. First
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) * check if it's a multifunction card by reading the PCI config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) * header type... Limit those to one aperture size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) pci_read_config_byte(rdev->pdev, 0xe, &byte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) if (byte & 0x80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) DRM_INFO("Generation 1 PCI interface in multifunction mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) DRM_INFO("Limiting VRAM to one aperture\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) return aper_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) /* Single function older card. We read HDP_APER_CNTL to see how the BIOS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) * have set it up. We don't write this as it's broken on some ASICs but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) * we expect the BIOS to have done the right thing (might be too optimistic...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) return aper_size * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) return aper_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) void r100_vram_init_sizes(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) u64 config_aper_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) /* work out accessible VRAM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) rdev->mc.visible_vram_size = r100_get_accessible_vram(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) /* FIXME we don't use the second aperture yet when we could use it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) if (rdev->mc.visible_vram_size > rdev->mc.aper_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) rdev->mc.visible_vram_size = rdev->mc.aper_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) if (rdev->flags & RADEON_IS_IGP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) uint32_t tom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) /* read NB_TOM to get the amount of ram stolen for the GPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) tom = RREG32(RADEON_NB_TOM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) /* Some production boards of m6 will report 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) * if it's 8 MB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) if (rdev->mc.real_vram_size == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) rdev->mc.real_vram_size = 8192 * 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) * Novell bug 204882 + along with lots of ubuntu ones
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) if (rdev->mc.aper_size > config_aper_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) config_aper_size = rdev->mc.aper_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) if (config_aper_size > rdev->mc.real_vram_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) rdev->mc.mc_vram_size = config_aper_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) void r100_vga_set_state(struct radeon_device *rdev, bool state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) uint32_t temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) temp = RREG32(RADEON_CONFIG_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) if (!state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) temp &= ~RADEON_CFG_VGA_RAM_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) temp |= RADEON_CFG_VGA_IO_DIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) temp &= ~RADEON_CFG_VGA_IO_DIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) WREG32(RADEON_CONFIG_CNTL, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) static void r100_mc_init(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) u64 base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) r100_vram_get_type(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) r100_vram_init_sizes(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) base = rdev->mc.aper_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) if (rdev->flags & RADEON_IS_IGP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) radeon_vram_location(rdev, &rdev->mc, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) rdev->mc.gtt_base_align = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) if (!(rdev->flags & RADEON_IS_AGP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) radeon_gtt_location(rdev, &rdev->mc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) radeon_update_bandwidth_info(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) * Indirect registers accessor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) void r100_pll_errata_after_index(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) if (rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) (void)RREG32(RADEON_CLOCK_CNTL_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) (void)RREG32(RADEON_CRTC_GEN_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) static void r100_pll_errata_after_data(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) /* This workarounds is necessary on RV100, RS100 and RS200 chips
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) * or the chip could hang on a subsequent access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) mdelay(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) /* This function is required to workaround a hardware bug in some (all?)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) * revisions of the R300. This workaround should be called after every
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) * CLOCK_CNTL_INDEX register access. If not, register reads afterward
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) * may not be correct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) if (rdev->pll_errata & CHIP_ERRATA_R300_CG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) uint32_t save, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) save = RREG32(RADEON_CLOCK_CNTL_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) tmp = save & ~(0x3f | RADEON_PLL_WR_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) WREG32(RADEON_CLOCK_CNTL_INDEX, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) tmp = RREG32(RADEON_CLOCK_CNTL_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) WREG32(RADEON_CLOCK_CNTL_INDEX, save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) uint32_t data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) spin_lock_irqsave(&rdev->pll_idx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) r100_pll_errata_after_index(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) data = RREG32(RADEON_CLOCK_CNTL_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) r100_pll_errata_after_data(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) spin_unlock_irqrestore(&rdev->pll_idx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) return data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) spin_lock_irqsave(&rdev->pll_idx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) r100_pll_errata_after_index(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) WREG32(RADEON_CLOCK_CNTL_DATA, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) r100_pll_errata_after_data(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) spin_unlock_irqrestore(&rdev->pll_idx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) static void r100_set_safe_registers(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) if (ASIC_IS_RN50(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(rn50_reg_safe_bm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) } else if (rdev->family < CHIP_R200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) rdev->config.r100.reg_safe_bm = r100_reg_safe_bm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) r200_set_safe_registers(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) * Debugfs info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) #if defined(CONFIG_DEBUG_FS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) static int r100_debugfs_rbbm_info(struct seq_file *m, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) struct drm_info_node *node = (struct drm_info_node *) m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) struct drm_device *dev = node->minor->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) struct radeon_device *rdev = dev->dev_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) uint32_t reg, value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) seq_printf(m, "RBBM_STATUS 0x%08x\n", RREG32(RADEON_RBBM_STATUS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) seq_printf(m, "RBBM_CMDFIFO_STAT 0x%08x\n", RREG32(0xE7C));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) for (i = 0; i < 64; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) WREG32(RADEON_RBBM_CMDFIFO_ADDR, i | 0x100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) reg = (RREG32(RADEON_RBBM_CMDFIFO_DATA) - 1) >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) WREG32(RADEON_RBBM_CMDFIFO_ADDR, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) value = RREG32(RADEON_RBBM_CMDFIFO_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) seq_printf(m, "[0x%03X] 0x%04X=0x%08X\n", i, reg, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) struct drm_info_node *node = (struct drm_info_node *) m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) struct drm_device *dev = node->minor->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) struct radeon_device *rdev = dev->dev_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) uint32_t rdp, wdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) unsigned count, i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) radeon_ring_free_size(rdev, ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) rdp = RREG32(RADEON_CP_RB_RPTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) wdp = RREG32(RADEON_CP_RB_WPTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) count = (rdp + ring->ring_size - wdp) & ring->ptr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) seq_printf(m, "%u dwords in ring\n", count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) if (ring->ready) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) for (j = 0; j <= count; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) i = (rdp + j) & ring->ptr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) static int r100_debugfs_cp_csq_fifo(struct seq_file *m, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) struct drm_info_node *node = (struct drm_info_node *) m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) struct drm_device *dev = node->minor->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) struct radeon_device *rdev = dev->dev_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) uint32_t csq_stat, csq2_stat, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) seq_printf(m, "CP_CSQ_MODE 0x%08x\n", RREG32(RADEON_CP_CSQ_MODE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) csq_stat = RREG32(RADEON_CP_CSQ_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) csq2_stat = RREG32(RADEON_CP_CSQ2_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) r_rptr = (csq_stat >> 0) & 0x3ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) r_wptr = (csq_stat >> 10) & 0x3ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) ib1_rptr = (csq_stat >> 20) & 0x3ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) ib1_wptr = (csq2_stat >> 0) & 0x3ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) ib2_rptr = (csq2_stat >> 10) & 0x3ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) ib2_wptr = (csq2_stat >> 20) & 0x3ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) seq_printf(m, "CP_CSQ_STAT 0x%08x\n", csq_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) seq_printf(m, "CP_CSQ2_STAT 0x%08x\n", csq2_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) seq_printf(m, "Ring rptr %u\n", r_rptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) seq_printf(m, "Ring wptr %u\n", r_wptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) seq_printf(m, "Indirect1 rptr %u\n", ib1_rptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) seq_printf(m, "Indirect1 wptr %u\n", ib1_wptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) seq_printf(m, "Indirect2 rptr %u\n", ib2_rptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) seq_printf(m, "Indirect2 wptr %u\n", ib2_wptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) /* FIXME: 0, 128, 640 depends on fifo setup see cp_init_kms
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) * 128 = indirect1_start * 8 & 640 = indirect2_start * 8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) seq_printf(m, "Ring fifo:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) for (i = 0; i < 256; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) WREG32(RADEON_CP_CSQ_ADDR, i << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) tmp = RREG32(RADEON_CP_CSQ_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) seq_printf(m, "rfifo[%04d]=0x%08X\n", i, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) seq_printf(m, "Indirect1 fifo:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) for (i = 256; i <= 512; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) WREG32(RADEON_CP_CSQ_ADDR, i << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) tmp = RREG32(RADEON_CP_CSQ_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) seq_printf(m, "ib1fifo[%04d]=0x%08X\n", i, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) seq_printf(m, "Indirect2 fifo:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) for (i = 640; i < ib1_wptr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) WREG32(RADEON_CP_CSQ_ADDR, i << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) tmp = RREG32(RADEON_CP_CSQ_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) seq_printf(m, "ib2fifo[%04d]=0x%08X\n", i, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) static int r100_debugfs_mc_info(struct seq_file *m, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) struct drm_info_node *node = (struct drm_info_node *) m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) struct drm_device *dev = node->minor->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) struct radeon_device *rdev = dev->dev_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) uint32_t tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) tmp = RREG32(RADEON_CONFIG_MEMSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) seq_printf(m, "CONFIG_MEMSIZE 0x%08x\n", tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) tmp = RREG32(RADEON_MC_FB_LOCATION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) seq_printf(m, "MC_FB_LOCATION 0x%08x\n", tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) tmp = RREG32(RADEON_BUS_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) tmp = RREG32(RADEON_MC_AGP_LOCATION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) tmp = RREG32(RADEON_AGP_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) tmp = RREG32(RADEON_HOST_PATH_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) tmp = RREG32(0x01D0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) seq_printf(m, "AIC_CTRL 0x%08x\n", tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) tmp = RREG32(RADEON_AIC_LO_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) seq_printf(m, "AIC_LO_ADDR 0x%08x\n", tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) tmp = RREG32(RADEON_AIC_HI_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) seq_printf(m, "AIC_HI_ADDR 0x%08x\n", tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) tmp = RREG32(0x01E4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) seq_printf(m, "AIC_TLB_ADDR 0x%08x\n", tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) static struct drm_info_list r100_debugfs_rbbm_list[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) {"r100_rbbm_info", r100_debugfs_rbbm_info, 0, NULL},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) static struct drm_info_list r100_debugfs_cp_list[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) {"r100_cp_ring_info", r100_debugfs_cp_ring_info, 0, NULL},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) {"r100_cp_csq_fifo", r100_debugfs_cp_csq_fifo, 0, NULL},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) static struct drm_info_list r100_debugfs_mc_info_list[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) {"r100_mc_info", r100_debugfs_mc_info, 0, NULL},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) int r100_debugfs_rbbm_init(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) #if defined(CONFIG_DEBUG_FS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) return radeon_debugfs_add_files(rdev, r100_debugfs_rbbm_list, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) int r100_debugfs_cp_init(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) #if defined(CONFIG_DEBUG_FS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) return radeon_debugfs_add_files(rdev, r100_debugfs_cp_list, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) int r100_debugfs_mc_info_init(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) #if defined(CONFIG_DEBUG_FS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) return radeon_debugfs_add_files(rdev, r100_debugfs_mc_info_list, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) int r100_set_surface_reg(struct radeon_device *rdev, int reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) uint32_t tiling_flags, uint32_t pitch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) uint32_t offset, uint32_t obj_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) int surf_index = reg * 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) int flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) if (rdev->family <= CHIP_RS200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) == (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) flags |= RADEON_SURF_TILE_COLOR_BOTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) if (tiling_flags & RADEON_TILING_MACRO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) flags |= RADEON_SURF_TILE_COLOR_MACRO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) /* setting pitch to 0 disables tiling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) pitch = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) } else if (rdev->family <= CHIP_RV280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) if (tiling_flags & (RADEON_TILING_MACRO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) flags |= R200_SURF_TILE_COLOR_MACRO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) if (tiling_flags & RADEON_TILING_MICRO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) flags |= R200_SURF_TILE_COLOR_MICRO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) if (tiling_flags & RADEON_TILING_MACRO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) flags |= R300_SURF_TILE_MACRO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) if (tiling_flags & RADEON_TILING_MICRO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) flags |= R300_SURF_TILE_MICRO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) if (tiling_flags & RADEON_TILING_SWAP_16BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) flags |= RADEON_SURF_AP0_SWP_16BPP | RADEON_SURF_AP1_SWP_16BPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) if (tiling_flags & RADEON_TILING_SWAP_32BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) /* r100/r200 divide by 16 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) if (rdev->family < CHIP_R300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) flags |= pitch / 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) flags |= pitch / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) DRM_DEBUG_KMS("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) WREG32(RADEON_SURFACE0_INFO + surf_index, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) void r100_clear_surface_reg(struct radeon_device *rdev, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) int surf_index = reg * 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) WREG32(RADEON_SURFACE0_INFO + surf_index, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) void r100_bandwidth_update(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) fixed20_12 crit_point_ff = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) fixed20_12 memtcas_ff[8] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) dfixed_init(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) dfixed_init(2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) dfixed_init(3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) dfixed_init(0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) dfixed_init_half(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) dfixed_init_half(2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) dfixed_init(0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) fixed20_12 memtcas_rs480_ff[8] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) dfixed_init(0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) dfixed_init(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) dfixed_init(2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) dfixed_init(3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) dfixed_init(0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) dfixed_init_half(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) dfixed_init_half(2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) dfixed_init_half(3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) fixed20_12 memtcas2_ff[8] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) dfixed_init(0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) dfixed_init(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) dfixed_init(2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) dfixed_init(3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) dfixed_init(4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) dfixed_init(5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) dfixed_init(6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) dfixed_init(7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) fixed20_12 memtrbs[8] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) dfixed_init(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) dfixed_init_half(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) dfixed_init(2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) dfixed_init_half(2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) dfixed_init(3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) dfixed_init_half(3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) dfixed_init(4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) dfixed_init_half(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) fixed20_12 memtrbs_r4xx[8] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) dfixed_init(4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) dfixed_init(5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) dfixed_init(6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) dfixed_init(7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) dfixed_init(8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) dfixed_init(9),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) dfixed_init(10),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) dfixed_init(11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) fixed20_12 min_mem_eff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) fixed20_12 cur_latency_mclk, cur_latency_sclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate = {0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) disp_drain_rate2, read_return_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) fixed20_12 time_disp1_drop_priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) int c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) int cur_size = 16; /* in octawords */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) int critical_point = 0, critical_point2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) /* uint32_t read_return_rate, time_disp1_drop_priority; */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) int stop_req, max_stop_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) struct drm_display_mode *mode1 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) struct drm_display_mode *mode2 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) uint32_t pixel_bytes1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) uint32_t pixel_bytes2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) /* Guess line buffer size to be 8192 pixels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) u32 lb_size = 8192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) if (!rdev->mode_info.mode_config_initialized)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) radeon_update_display_priority(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) if (rdev->mode_info.crtcs[0]->base.enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) const struct drm_framebuffer *fb =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) rdev->mode_info.crtcs[0]->base.primary->fb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) mode1 = &rdev->mode_info.crtcs[0]->base.mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) pixel_bytes1 = fb->format->cpp[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) if (rdev->mode_info.crtcs[1]->base.enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) const struct drm_framebuffer *fb =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) rdev->mode_info.crtcs[1]->base.primary->fb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) mode2 = &rdev->mode_info.crtcs[1]->base.mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) pixel_bytes2 = fb->format->cpp[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) min_mem_eff.full = dfixed_const_8(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) /* get modes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) /* check crtc enables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) if (mode2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) if (mode1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) * determine is there is enough bw for current mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) sclk_ff = rdev->pm.sclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) mclk_ff = rdev->pm.mclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) temp_ff.full = dfixed_const(temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) mem_bw.full = dfixed_mul(mclk_ff, temp_ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) pix_clk.full = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) pix_clk2.full = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) peak_disp_bw.full = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) if (mode1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) temp_ff.full = dfixed_const(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) pix_clk.full = dfixed_div(pix_clk, temp_ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) temp_ff.full = dfixed_const(pixel_bytes1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) if (mode2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) temp_ff.full = dfixed_const(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) pix_clk2.full = dfixed_div(pix_clk2, temp_ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) temp_ff.full = dfixed_const(pixel_bytes2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) mem_bw.full = dfixed_mul(mem_bw, min_mem_eff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) if (peak_disp_bw.full >= mem_bw.full) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) DRM_ERROR("You may not have enough display bandwidth for current mode\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) /* Get values from the EXT_MEM_CNTL register...converting its contents. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) temp = RREG32(RADEON_MEM_TIMING_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) mem_trcd = ((temp >> 2) & 0x3) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) mem_trp = ((temp & 0x3)) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) mem_tras = ((temp & 0x70) >> 4) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) } else if (rdev->family == CHIP_R300 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) rdev->family == CHIP_R350) { /* r300, r350 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) mem_trcd = (temp & 0x7) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) mem_trp = ((temp >> 8) & 0x7) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) mem_tras = ((temp >> 11) & 0xf) + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) } else if (rdev->family == CHIP_RV350 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) rdev->family == CHIP_RV380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) /* rv3x0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) mem_trcd = (temp & 0x7) + 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) mem_trp = ((temp >> 8) & 0x7) + 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) mem_tras = ((temp >> 11) & 0xf) + 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) } else if (rdev->family == CHIP_R420 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) rdev->family == CHIP_R423 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) rdev->family == CHIP_RV410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) /* r4xx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) mem_trcd = (temp & 0xf) + 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) if (mem_trcd > 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) mem_trcd = 15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) mem_trp = ((temp >> 8) & 0xf) + 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) if (mem_trp > 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) mem_trp = 15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) mem_tras = ((temp >> 12) & 0x1f) + 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) if (mem_tras > 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) mem_tras = 31;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) } else { /* RV200, R200 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) mem_trcd = (temp & 0x7) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) mem_trp = ((temp >> 8) & 0x7) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) mem_tras = ((temp >> 12) & 0xf) + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) /* convert to FF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) trcd_ff.full = dfixed_const(mem_trcd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) trp_ff.full = dfixed_const(mem_trp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) tras_ff.full = dfixed_const(mem_tras);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) /* Get values from the MEM_SDRAM_MODE_REG register...converting its */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) temp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) data = (temp & (7 << 20)) >> 20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) if (rdev->family == CHIP_RS480) /* don't think rs400 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) tcas_ff = memtcas_rs480_ff[data];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) tcas_ff = memtcas_ff[data];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) tcas_ff = memtcas2_ff[data];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) if (rdev->family == CHIP_RS400 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) rdev->family == CHIP_RS480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) /* extra cas latency stored in bits 23-25 0-4 clocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) data = (temp >> 23) & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) if (data < 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) tcas_ff.full += dfixed_const(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) /* on the R300, Tcas is included in Trbs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) temp = RREG32(RADEON_MEM_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) data = (R300_MEM_NUM_CHANNELS_MASK & temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) if (data == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) if (R300_MEM_USE_CD_CH_ONLY & temp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) temp = RREG32(R300_MC_IND_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) temp &= ~R300_MC_IND_ADDR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) temp |= R300_MC_READ_CNTL_CD_mcind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) WREG32(R300_MC_IND_INDEX, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) temp = RREG32(R300_MC_IND_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) data = (R300_MEM_RBS_POSITION_C_MASK & temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) temp = RREG32(R300_MC_READ_CNTL_AB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) data = (R300_MEM_RBS_POSITION_A_MASK & temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) temp = RREG32(R300_MC_READ_CNTL_AB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) data = (R300_MEM_RBS_POSITION_A_MASK & temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) if (rdev->family == CHIP_RV410 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) rdev->family == CHIP_R420 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) rdev->family == CHIP_R423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) trbs_ff = memtrbs_r4xx[data];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) trbs_ff = memtrbs[data];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) tcas_ff.full += trbs_ff.full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) sclk_eff_ff.full = sclk_ff.full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) if (rdev->flags & RADEON_IS_AGP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) fixed20_12 agpmode_ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) agpmode_ff.full = dfixed_const(radeon_agpmode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) temp_ff.full = dfixed_const_666(16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) /* TODO PCIE lanes may affect this - agpmode == 16?? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) if (ASIC_IS_R300(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) sclk_delay_ff.full = dfixed_const(250);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) if ((rdev->family == CHIP_RV100) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) rdev->flags & RADEON_IS_IGP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) if (rdev->mc.vram_is_ddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) sclk_delay_ff.full = dfixed_const(41);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) sclk_delay_ff.full = dfixed_const(33);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) if (rdev->mc.vram_width == 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) sclk_delay_ff.full = dfixed_const(57);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) sclk_delay_ff.full = dfixed_const(41);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) if (rdev->mc.vram_is_ddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) if (rdev->mc.vram_width == 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) k1.full = dfixed_const(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) c = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) k1.full = dfixed_const(20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) c = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) k1.full = dfixed_const(40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) c = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) temp_ff.full = dfixed_const(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) temp_ff.full = dfixed_const(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) temp_ff.full = dfixed_const(4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) mc_latency_mclk.full += k1.full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) HW cursor time assuming worst case of full size colour cursor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) temp_ff.full += trcd_ff.full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) if (temp_ff.full < tras_ff.full)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) temp_ff.full = tras_ff.full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) temp_ff.full = dfixed_const(cur_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) Find the total latency for the display data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) disp_latency_overhead.full = dfixed_const(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) if (mc_latency_mclk.full > mc_latency_sclk.full)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) disp_latency.full = mc_latency_mclk.full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) disp_latency.full = mc_latency_sclk.full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) /* setup Max GRPH_STOP_REQ default value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) if (ASIC_IS_RV100(rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) max_stop_req = 0x5c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) max_stop_req = 0x7c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) if (mode1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) /* CRTC1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) Set GRPH_BUFFER_CNTL register using h/w defined optimal values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) stop_req = mode1->hdisplay * pixel_bytes1 / 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) if (stop_req > max_stop_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) stop_req = max_stop_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) Find the drain rate of the display buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) temp_ff.full = dfixed_const((16/pixel_bytes1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) disp_drain_rate.full = dfixed_div(pix_clk, temp_ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) Find the critical point of the display buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) crit_point_ff.full += dfixed_const_half(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) critical_point = dfixed_trunc(crit_point_ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) if (rdev->disp_priority == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) critical_point = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) The critical point should never be above max_stop_req-4. Setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) if (max_stop_req - critical_point < 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) critical_point = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) /* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) critical_point = 0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) temp = RREG32(RADEON_GRPH_BUFFER_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) temp &= ~(RADEON_GRPH_STOP_REQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) temp &= ~(RADEON_GRPH_START_REQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) if ((rdev->family == CHIP_R350) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) (stop_req > 0x15)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) stop_req -= 0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) temp |= RADEON_GRPH_BUFFER_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) temp &= ~(RADEON_GRPH_CRITICAL_CNTL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) RADEON_GRPH_CRITICAL_AT_SOF |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) RADEON_GRPH_STOP_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) Write the result into the register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) if ((rdev->family == CHIP_RS400) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) (rdev->family == CHIP_RS480)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) /* attempt to program RS400 disp regs correctly ??? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) temp = RREG32(RS400_DISP1_REG_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) RS400_DISP1_STOP_REQ_LEVEL_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) WREG32(RS400_DISP1_REQ_CNTL1, (temp |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) temp = RREG32(RS400_DMIF_MEM_CNTL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) RS400_DISP1_CRITICAL_POINT_STOP_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) WREG32(RS400_DMIF_MEM_CNTL1, (temp |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) DRM_DEBUG_KMS("GRPH_BUFFER_CNTL from to %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) /* (unsigned int)info->SavedReg->grph_buffer_cntl, */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) if (mode2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) u32 grph2_cntl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) stop_req = mode2->hdisplay * pixel_bytes2 / 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) if (stop_req > max_stop_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) stop_req = max_stop_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) Find the drain rate of the display buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) temp_ff.full = dfixed_const((16/pixel_bytes2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) if ((rdev->family == CHIP_R350) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) (stop_req > 0x15)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) stop_req -= 0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) grph2_cntl |= RADEON_GRPH_BUFFER_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) RADEON_GRPH_CRITICAL_AT_SOF |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) RADEON_GRPH_STOP_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) if ((rdev->family == CHIP_RS100) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) (rdev->family == CHIP_RS200))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) critical_point2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) temp_ff.full = dfixed_const(temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) temp_ff.full = dfixed_mul(mclk_ff, temp_ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) if (sclk_ff.full < temp_ff.full)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) temp_ff.full = sclk_ff.full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) read_return_rate.full = temp_ff.full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) if (mode1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) temp_ff.full = read_return_rate.full - disp_drain_rate.full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) time_disp1_drop_priority.full = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) crit_point_ff.full += dfixed_const_half(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) critical_point2 = dfixed_trunc(crit_point_ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) if (rdev->disp_priority == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) critical_point2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) if (max_stop_req - critical_point2 < 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) critical_point2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) if (critical_point2 == 0 && rdev->family == CHIP_R300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) /* some R300 cards have problem with this set to 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) critical_point2 = 0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) if ((rdev->family == CHIP_RS400) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) (rdev->family == CHIP_RS480)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) /* attempt to program RS400 disp2 regs correctly ??? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) temp = RREG32(RS400_DISP2_REQ_CNTL1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) RS400_DISP2_STOP_REQ_LEVEL_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) WREG32(RS400_DISP2_REQ_CNTL1, (temp |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) temp = RREG32(RS400_DISP2_REQ_CNTL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) RS400_DISP2_CRITICAL_POINT_STOP_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) WREG32(RS400_DISP2_REQ_CNTL2, (temp |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) /* Save number of lines the linebuffer leads before the scanout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) if (mode1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) rdev->mode_info.crtcs[0]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode1->crtc_hdisplay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) if (mode2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) rdev->mode_info.crtcs[1]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode2->crtc_hdisplay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) uint32_t scratch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) uint32_t tmp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) r = radeon_scratch_get(rdev, &scratch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) WREG32(scratch, 0xCAFEDEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) r = radeon_ring_lock(rdev, ring, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) radeon_scratch_free(rdev, scratch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) radeon_ring_write(ring, PACKET0(scratch, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) radeon_ring_write(ring, 0xDEADBEEF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) radeon_ring_unlock_commit(rdev, ring, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) for (i = 0; i < rdev->usec_timeout; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) tmp = RREG32(scratch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) if (tmp == 0xDEADBEEF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) if (i < rdev->usec_timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) DRM_INFO("ring test succeeded in %d usecs\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) scratch, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) radeon_scratch_free(rdev, scratch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) if (ring->rptr_save_reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) u32 next_rptr = ring->wptr + 2 + 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) radeon_ring_write(ring, PACKET0(ring->rptr_save_reg, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) radeon_ring_write(ring, next_rptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) radeon_ring_write(ring, PACKET0(RADEON_CP_IB_BASE, 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) radeon_ring_write(ring, ib->gpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) radeon_ring_write(ring, ib->length_dw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) struct radeon_ib ib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) uint32_t scratch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) uint32_t tmp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) r = radeon_scratch_get(rdev, &scratch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) WREG32(scratch, 0xCAFEDEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &ib, NULL, 256);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) DRM_ERROR("radeon: failed to get ib (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) goto free_scratch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) ib.ptr[0] = PACKET0(scratch, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) ib.ptr[1] = 0xDEADBEEF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) ib.ptr[2] = PACKET2(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) ib.ptr[3] = PACKET2(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) ib.ptr[4] = PACKET2(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) ib.ptr[5] = PACKET2(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) ib.ptr[6] = PACKET2(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) ib.ptr[7] = PACKET2(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) ib.length_dw = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) r = radeon_ib_schedule(rdev, &ib, NULL, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) goto free_ib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) r = radeon_fence_wait_timeout(ib.fence, false, usecs_to_jiffies(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) RADEON_USEC_IB_TEST_TIMEOUT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) if (r < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) DRM_ERROR("radeon: fence wait failed (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) goto free_ib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) } else if (r == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) DRM_ERROR("radeon: fence wait timed out.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) r = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) goto free_ib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) for (i = 0; i < rdev->usec_timeout; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) tmp = RREG32(scratch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) if (tmp == 0xDEADBEEF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) if (i < rdev->usec_timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) DRM_INFO("ib test succeeded in %u usecs\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) scratch, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) free_ib:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) radeon_ib_free(rdev, &ib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) free_scratch:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) radeon_scratch_free(rdev, scratch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) /* Shutdown CP we shouldn't need to do that but better be safe than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) * sorry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) WREG32(R_000740_CP_CSQ_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) /* Save few CRTC registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) save->GENMO_WT = RREG8(R_0003C2_GENMO_WT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) save->CRTC2_GEN_CNTL = RREG32(R_0003F8_CRTC2_GEN_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) save->CUR2_OFFSET = RREG32(R_000360_CUR2_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) /* Disable VGA aperture access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & save->GENMO_WT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) /* Disable cursor, overlay, crtc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) S_000054_CRTC_DISPLAY_DIS(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) WREG32(R_000050_CRTC_GEN_CNTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) (C_000050_CRTC_CUR_EN & save->CRTC_GEN_CNTL) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) S_000050_CRTC_DISP_REQ_EN_B(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) WREG32(R_000420_OV0_SCALE_CNTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) C_000420_OV0_OVERLAY_EN & RREG32(R_000420_OV0_SCALE_CNTL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) WREG32(R_000260_CUR_OFFSET, C_000260_CUR_LOCK & save->CUR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) WREG32(R_000360_CUR2_OFFSET, save->CUR2_OFFSET |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) S_000360_CUR2_LOCK(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) WREG32(R_0003F8_CRTC2_GEN_CNTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) (C_0003F8_CRTC2_CUR_EN & save->CRTC2_GEN_CNTL) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) S_0003F8_CRTC2_DISPLAY_DIS(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) S_0003F8_CRTC2_DISP_REQ_EN_B(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) WREG32(R_000360_CUR2_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) C_000360_CUR2_LOCK & save->CUR2_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) /* Update base address for crtc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) /* Restore CRTC registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) WREG8(R_0003C2_GENMO_WT, save->GENMO_WT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) void r100_vga_render_disable(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) tmp = RREG8(R_0003C2_GENMO_WT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) static void r100_debugfs(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) r = r100_debugfs_mc_info_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) dev_warn(rdev->dev, "Failed to create r100_mc debugfs file.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) static void r100_mc_program(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) struct r100_mc_save save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) /* Stops all mc clients */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) r100_mc_stop(rdev, &save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) if (rdev->flags & RADEON_IS_AGP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) WREG32(R_00014C_MC_AGP_LOCATION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) if (rdev->family > CHIP_RV200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) WREG32(R_00015C_AGP_BASE_2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) upper_32_bits(rdev->mc.agp_base) & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) WREG32(R_000170_AGP_BASE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) if (rdev->family > CHIP_RV200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) WREG32(R_00015C_AGP_BASE_2, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) /* Wait for mc idle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) if (r100_mc_wait_for_idle(rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) dev_warn(rdev->dev, "Wait for MC idle timeout.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) /* Program MC, should be a 32bits limited address space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) WREG32(R_000148_MC_FB_LOCATION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) r100_mc_resume(rdev, &save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) static void r100_clock_startup(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) if (radeon_dynclks != -1 && radeon_dynclks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) radeon_legacy_set_clock_gating(rdev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) /* We need to force on some of the block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) tmp = RREG32_PLL(R_00000D_SCLK_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) if ((rdev->family == CHIP_RV250) || (rdev->family == CHIP_RV280))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) tmp |= S_00000D_FORCE_DISP1(1) | S_00000D_FORCE_DISP2(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) WREG32_PLL(R_00000D_SCLK_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) static int r100_startup(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) /* set common regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) r100_set_common_regs(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) /* program mc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) r100_mc_program(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) /* Resume clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) r100_clock_startup(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) /* Initialize GART (initialize after TTM so we can allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) * memory through TTM but finalize after TTM) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) r100_enable_bm(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) if (rdev->flags & RADEON_IS_PCI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) r = r100_pci_gart_enable(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) /* allocate wb buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) r = radeon_wb_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) /* Enable IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) if (!rdev->irq.installed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) r = radeon_irq_kms_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) r100_irq_set(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) /* 1M ring buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) r = r100_cp_init(rdev, 1024 * 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) r = radeon_ib_pool_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) int r100_resume(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) /* Make sur GART are not working */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) if (rdev->flags & RADEON_IS_PCI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) r100_pci_gart_disable(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) /* Resume clock before doing reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) r100_clock_startup(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) /* Reset gpu before posting otherwise ATOM will enter infinite loop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) if (radeon_asic_reset(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) RREG32(R_000E40_RBBM_STATUS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) RREG32(R_0007C0_CP_STAT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) /* post */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) radeon_combios_asic_init(rdev->ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) /* Resume clock after posting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) r100_clock_startup(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) /* Initialize surface registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) radeon_surface_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) rdev->accel_working = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) r = r100_startup(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) rdev->accel_working = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) int r100_suspend(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) radeon_pm_suspend(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) r100_cp_disable(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) radeon_wb_disable(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) r100_irq_disable(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) if (rdev->flags & RADEON_IS_PCI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) r100_pci_gart_disable(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) void r100_fini(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) radeon_pm_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) r100_cp_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) radeon_wb_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) radeon_ib_pool_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) radeon_gem_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) if (rdev->flags & RADEON_IS_PCI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) r100_pci_gart_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) radeon_agp_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) radeon_irq_kms_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) radeon_fence_driver_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) radeon_bo_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) radeon_atombios_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) kfree(rdev->bios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) rdev->bios = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) * Due to how kexec works, it can leave the hw fully initialised when it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) * boots the new kernel. However doing our init sequence with the CP and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) * WB stuff setup causes GPU hangs on the RN50 at least. So at startup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) * do some quick sanity checks and restore sane values to avoid this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) * problem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) void r100_restore_sanity(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) tmp = RREG32(RADEON_CP_CSQ_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) if (tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) WREG32(RADEON_CP_CSQ_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) tmp = RREG32(RADEON_CP_RB_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) if (tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) WREG32(RADEON_CP_RB_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) tmp = RREG32(RADEON_SCRATCH_UMSK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) if (tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) WREG32(RADEON_SCRATCH_UMSK, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) int r100_init(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) /* Register debugfs file specific to this group of asics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) r100_debugfs(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) /* Disable VGA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) r100_vga_render_disable(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) /* Initialize scratch registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) radeon_scratch_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) /* Initialize surface registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) radeon_surface_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) /* sanity check some register to avoid hangs like after kexec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) r100_restore_sanity(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) /* TODO: disable VGA need to use VGA request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) /* BIOS*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) if (!radeon_get_bios(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) if (ASIC_IS_AVIVO(rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) if (rdev->is_atom_bios) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) r = radeon_combios_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) /* Reset gpu before posting otherwise ATOM will enter infinite loop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) if (radeon_asic_reset(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) dev_warn(rdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) RREG32(R_000E40_RBBM_STATUS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) RREG32(R_0007C0_CP_STAT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) /* check if cards are posted or not */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) if (radeon_boot_test_post_card(rdev) == false)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) /* Set asic errata */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) r100_errata(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) /* Initialize clocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) radeon_get_clock_info(rdev->ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) /* initialize AGP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) if (rdev->flags & RADEON_IS_AGP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) r = radeon_agp_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) radeon_agp_disable(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) /* initialize VRAM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) r100_mc_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) /* Fence driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) r = radeon_fence_driver_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) /* Memory manager */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) r = radeon_bo_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) if (rdev->flags & RADEON_IS_PCI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) r = r100_pci_gart_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) r100_set_safe_registers(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) /* Initialize power management */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) radeon_pm_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) rdev->accel_working = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) r = r100_startup(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) /* Somethings want wront with the accel init stop accel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) dev_err(rdev->dev, "Disabling GPU acceleration\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) r100_cp_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) radeon_wb_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) radeon_ib_pool_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) radeon_irq_kms_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) if (rdev->flags & RADEON_IS_PCI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) r100_pci_gart_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) rdev->accel_working = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) uint32_t r100_mm_rreg_slow(struct radeon_device *rdev, uint32_t reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) uint32_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) void r100_mm_wreg_slow(struct radeon_device *rdev, uint32_t reg, uint32_t v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) u32 r100_io_rreg(struct radeon_device *rdev, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) if (reg < rdev->rio_mem_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) return ioread32(rdev->rio_mem + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) return ioread32(rdev->rio_mem + RADEON_MM_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) if (reg < rdev->rio_mem_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) iowrite32(v, rdev->rio_mem + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) iowrite32(v, rdev->rio_mem + RADEON_MM_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) }