^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright 2008 Advanced Micro Devices, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright 2008 Red Hat Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright 2009 Jerome Glisse.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Permission is hereby granted, free of charge, to any person obtaining a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * copy of this software and associated documentation files (the "Software"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * to deal in the Software without restriction, including without limitation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * the rights to use, copy, modify, merge, publish, distribute, sublicense,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * and/or sell copies of the Software, and to permit persons to whom the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Software is furnished to do so, subject to the following conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * The above copyright notice and this permission notice shall be included in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * all copies or substantial portions of the Software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * OTHER DEALINGS IN THE SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * Authors: Dave Airlie
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * Alex Deucher
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * Jerome Glisse
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/firmware.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <drm/drm_debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <drm/drm_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <drm/drm_vblank.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <drm/radeon_drm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include "atom.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include "avivod.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include "r600d.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include "radeon.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include "radeon_asic.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include "radeon_audio.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include "radeon_mode.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include "radeon_ucode.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /* Firmware Names */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) MODULE_FIRMWARE("radeon/R600_pfp.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) MODULE_FIRMWARE("radeon/R600_me.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) MODULE_FIRMWARE("radeon/RV610_pfp.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) MODULE_FIRMWARE("radeon/RV610_me.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) MODULE_FIRMWARE("radeon/RV630_pfp.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) MODULE_FIRMWARE("radeon/RV630_me.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) MODULE_FIRMWARE("radeon/RV620_pfp.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) MODULE_FIRMWARE("radeon/RV620_me.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) MODULE_FIRMWARE("radeon/RV635_pfp.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) MODULE_FIRMWARE("radeon/RV635_me.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) MODULE_FIRMWARE("radeon/RV670_pfp.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) MODULE_FIRMWARE("radeon/RV670_me.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) MODULE_FIRMWARE("radeon/RS780_pfp.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) MODULE_FIRMWARE("radeon/RS780_me.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) MODULE_FIRMWARE("radeon/RV770_pfp.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) MODULE_FIRMWARE("radeon/RV770_me.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) MODULE_FIRMWARE("radeon/RV770_smc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) MODULE_FIRMWARE("radeon/RV730_pfp.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) MODULE_FIRMWARE("radeon/RV730_me.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) MODULE_FIRMWARE("radeon/RV730_smc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) MODULE_FIRMWARE("radeon/RV740_smc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) MODULE_FIRMWARE("radeon/RV710_pfp.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) MODULE_FIRMWARE("radeon/RV710_me.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) MODULE_FIRMWARE("radeon/RV710_smc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) MODULE_FIRMWARE("radeon/R600_rlc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) MODULE_FIRMWARE("radeon/R700_rlc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) MODULE_FIRMWARE("radeon/CEDAR_me.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) MODULE_FIRMWARE("radeon/CEDAR_smc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) MODULE_FIRMWARE("radeon/REDWOOD_smc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) MODULE_FIRMWARE("radeon/JUNIPER_smc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) MODULE_FIRMWARE("radeon/CYPRESS_smc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) MODULE_FIRMWARE("radeon/PALM_pfp.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) MODULE_FIRMWARE("radeon/PALM_me.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) MODULE_FIRMWARE("radeon/SUMO_pfp.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) MODULE_FIRMWARE("radeon/SUMO_me.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) MODULE_FIRMWARE("radeon/SUMO2_me.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static const u32 crtc_offsets[2] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) int r600_debugfs_mc_info_init(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* r600,rv610,rv630,rv620,rv635,rv670 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) int r600_mc_wait_for_idle(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) static void r600_gpu_init(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) void r600_fini(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) void r600_irq_disable(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) static void r600_pcie_gen2_enable(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) extern int evergreen_rlc_resume(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) extern void rv770_set_clk_bypass_mode(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * Indirect registers accessor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) u32 r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) spin_lock_irqsave(&rdev->rcu_idx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) WREG32(R600_RCU_INDEX, ((reg) & 0x1fff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) r = RREG32(R600_RCU_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) spin_lock_irqsave(&rdev->rcu_idx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) WREG32(R600_RCU_INDEX, ((reg) & 0x1fff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) WREG32(R600_RCU_DATA, (v));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) u32 r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) spin_lock_irqsave(&rdev->uvd_idx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) r = RREG32(R600_UVD_CTX_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) spin_lock_irqsave(&rdev->uvd_idx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) WREG32(R600_UVD_CTX_DATA, (v));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * r600_get_allowed_info_register - fetch the register for the info ioctl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * @rdev: radeon_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * @reg: register offset in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * @val: register value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * Returns 0 for success or -EINVAL for an invalid register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) int r600_get_allowed_info_register(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) u32 reg, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) switch (reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) case GRBM_STATUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) case GRBM_STATUS2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) case R_000E50_SRBM_STATUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) case DMA_STATUS_REG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) case UVD_STATUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) *val = RREG32(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * r600_get_xclk - get the xclk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * @rdev: radeon_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * Returns the reference clock used by the gfx engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * (r6xx, IGPs, APUs).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) u32 r600_get_xclk(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) return rdev->clock.spll.reference_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) unsigned fb_div = 0, ref_div, vclk_div = 0, dclk_div = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) /* bypass vclk and dclk with bclk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) WREG32_P(CG_UPLL_FUNC_CNTL_2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) /* assert BYPASS_EN, deassert UPLL_RESET, UPLL_SLEEP and UPLL_CTLREQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) UPLL_RESET_MASK | UPLL_SLEEP_MASK | UPLL_CTLREQ_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (rdev->family >= CHIP_RS780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) WREG32_P(GFX_MACRO_BYPASS_CNTL, UPLL_BYPASS_CNTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) ~UPLL_BYPASS_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (!vclk || !dclk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) /* keep the Bypass mode, put PLL to sleep */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (rdev->clock.spll.reference_freq == 10000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) ref_div = 34;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) ref_div = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 50000, 160000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) ref_div + 1, 0xFFF, 2, 30, ~0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) &fb_div, &vclk_div, &dclk_div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (rdev->family >= CHIP_RV670 && rdev->family < CHIP_RS780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) fb_div >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) fb_div |= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) /* assert PLL_RESET */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /* For RS780 we have to choose ref clk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (rdev->family >= CHIP_RS780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_REFCLK_SRC_SEL_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) ~UPLL_REFCLK_SRC_SEL_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) /* set the required fb, ref and post divder values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) WREG32_P(CG_UPLL_FUNC_CNTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) UPLL_FB_DIV(fb_div) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) UPLL_REF_DIV(ref_div),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) ~(UPLL_FB_DIV_MASK | UPLL_REF_DIV_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) WREG32_P(CG_UPLL_FUNC_CNTL_2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) UPLL_SW_HILEN(vclk_div >> 1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) UPLL_SW_LOLEN((vclk_div >> 1) + (vclk_div & 1)) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) UPLL_SW_HILEN2(dclk_div >> 1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) UPLL_SW_LOLEN2((dclk_div >> 1) + (dclk_div & 1)) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) UPLL_DIVEN_MASK | UPLL_DIVEN2_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) ~UPLL_SW_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) /* give the PLL some time to settle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) mdelay(15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) /* deassert PLL_RESET */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) mdelay(15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) /* deassert BYPASS EN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (rdev->family >= CHIP_RS780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~UPLL_BYPASS_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /* switch VCLK and DCLK selection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) WREG32_P(CG_UPLL_FUNC_CNTL_2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) mdelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) void dce3_program_fmt(struct drm_encoder *encoder)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) struct drm_device *dev = encoder->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) struct radeon_device *rdev = dev->dev_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) int bpc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) u32 tmp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (connector) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) struct radeon_connector *radeon_connector = to_radeon_connector(connector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) bpc = radeon_get_monitor_bpc(connector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) dither = radeon_connector->dither;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) /* LVDS FMT is set up by atom */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) /* not needed for analog */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) if (bpc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) switch (bpc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) case 6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (dither == RADEON_FMT_DITHER_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) /* XXX sort out optimal dither settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) tmp |= FMT_SPATIAL_DITHER_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) tmp |= FMT_TRUNCATE_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (dither == RADEON_FMT_DITHER_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /* XXX sort out optimal dither settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) tmp |= (FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) case 10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) /* not needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) /* get temperature in millidegrees */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) int rv6xx_get_temp(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) ASIC_T_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) int actual_temp = temp & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (temp & 0x100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) actual_temp -= 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) return actual_temp * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) void r600_pm_get_dynpm_state(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) rdev->pm.dynpm_can_upclock = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) rdev->pm.dynpm_can_downclock = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) /* power state array is low to high, default is first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) int min_power_state_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) if (rdev->pm.num_power_states > 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) min_power_state_index = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) switch (rdev->pm.dynpm_planned_action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) case DYNPM_ACTION_MINIMUM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) rdev->pm.requested_power_state_index = min_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) rdev->pm.requested_clock_mode_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) rdev->pm.dynpm_can_downclock = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) case DYNPM_ACTION_DOWNCLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (rdev->pm.current_power_state_index == min_power_state_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) rdev->pm.dynpm_can_downclock = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (rdev->pm.active_crtc_count > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) for (i = 0; i < rdev->pm.num_power_states; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) else if (i >= rdev->pm.current_power_state_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) rdev->pm.requested_power_state_index =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) rdev->pm.current_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) rdev->pm.requested_power_state_index = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (rdev->pm.current_power_state_index == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) rdev->pm.requested_power_state_index =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) rdev->pm.num_power_states - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) rdev->pm.requested_power_state_index =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) rdev->pm.current_power_state_index - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) rdev->pm.requested_clock_mode_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) /* don't use the power state if crtcs are active and no display flag is set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if ((rdev->pm.active_crtc_count > 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) (rdev->pm.power_state[rdev->pm.requested_power_state_index].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) clock_info[rdev->pm.requested_clock_mode_index].flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) RADEON_PM_MODE_NO_DISPLAY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) rdev->pm.requested_power_state_index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) case DYNPM_ACTION_UPCLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) rdev->pm.dynpm_can_upclock = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if (rdev->pm.active_crtc_count > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) else if (i <= rdev->pm.current_power_state_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) rdev->pm.requested_power_state_index =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) rdev->pm.current_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) rdev->pm.requested_power_state_index = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) rdev->pm.requested_power_state_index =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) rdev->pm.current_power_state_index + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) rdev->pm.requested_clock_mode_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) case DYNPM_ACTION_DEFAULT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) rdev->pm.requested_clock_mode_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) rdev->pm.dynpm_can_upclock = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) case DYNPM_ACTION_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) DRM_ERROR("Requested mode for not defined action\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) /* XXX select a power state based on AC/DC, single/dualhead, etc. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) /* for now just select the first power state and switch between clock modes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) /* power state array is low to high, default is first (0) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (rdev->pm.active_crtc_count > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) rdev->pm.requested_power_state_index = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) /* start at 1 as we don't want the default mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) for (i = 1; i < rdev->pm.num_power_states; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) rdev->pm.requested_power_state_index = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) /* if nothing selected, grab the default state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (rdev->pm.requested_power_state_index == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) rdev->pm.requested_power_state_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) rdev->pm.requested_power_state_index = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) switch (rdev->pm.dynpm_planned_action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) case DYNPM_ACTION_MINIMUM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) rdev->pm.requested_clock_mode_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) rdev->pm.dynpm_can_downclock = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) case DYNPM_ACTION_DOWNCLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (rdev->pm.current_clock_mode_index == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) rdev->pm.requested_clock_mode_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) rdev->pm.dynpm_can_downclock = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) rdev->pm.requested_clock_mode_index =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) rdev->pm.current_clock_mode_index - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) rdev->pm.requested_clock_mode_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) rdev->pm.dynpm_can_downclock = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) /* don't use the power state if crtcs are active and no display flag is set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if ((rdev->pm.active_crtc_count > 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) (rdev->pm.power_state[rdev->pm.requested_power_state_index].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) clock_info[rdev->pm.requested_clock_mode_index].flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) RADEON_PM_MODE_NO_DISPLAY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) rdev->pm.requested_clock_mode_index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) case DYNPM_ACTION_UPCLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) if (rdev->pm.current_clock_mode_index ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) rdev->pm.dynpm_can_upclock = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) rdev->pm.requested_clock_mode_index =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) rdev->pm.current_clock_mode_index + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) rdev->pm.requested_clock_mode_index =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) rdev->pm.dynpm_can_upclock = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) case DYNPM_ACTION_DEFAULT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) rdev->pm.requested_clock_mode_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) rdev->pm.dynpm_can_upclock = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) case DYNPM_ACTION_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) DRM_ERROR("Requested mode for not defined action\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) rdev->pm.power_state[rdev->pm.requested_power_state_index].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) clock_info[rdev->pm.requested_clock_mode_index].sclk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) rdev->pm.power_state[rdev->pm.requested_power_state_index].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) clock_info[rdev->pm.requested_clock_mode_index].mclk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) rdev->pm.power_state[rdev->pm.requested_power_state_index].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) pcie_lanes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) void rs780_pm_init_profile(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (rdev->pm.num_power_states == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) /* default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) /* low sh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) /* mid sh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) /* high sh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) /* low mh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) /* mid mh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) /* high mh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) } else if (rdev->pm.num_power_states == 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) /* default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) /* low sh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) /* mid sh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) /* high sh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) /* low mh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) /* mid mh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) /* high mh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) /* default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) /* low sh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) /* mid sh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) /* high sh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) /* low mh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) /* mid mh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) /* high mh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) void r600_pm_init_profile(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (rdev->family == CHIP_R600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) /* XXX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) /* default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) /* low sh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) /* mid sh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) /* high sh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) /* low mh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) /* mid mh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) /* high mh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) if (rdev->pm.num_power_states < 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) /* default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) /* low sh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) /* mid sh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) /* high sh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) /* low mh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) /* low mh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) /* high mh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) /* default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) /* low sh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (rdev->flags & RADEON_IS_MOBILITY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) /* mid sh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) /* high sh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) /* low mh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if (rdev->flags & RADEON_IS_MOBILITY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) /* mid mh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) /* high mh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) void r600_pm_misc(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) int req_ps_idx = rdev->pm.requested_power_state_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) int req_cm_idx = rdev->pm.requested_clock_mode_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) /* 0xff01 is a flag rather then an actual voltage */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) if (voltage->voltage == 0xff01)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (voltage->voltage != rdev->pm.current_vddc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) rdev->pm.current_vddc = voltage->voltage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) bool r600_gui_idle(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) /* hpd for digital panel detect/disconnect */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) bool connected = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) if (ASIC_IS_DCE3(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) switch (hpd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) case RADEON_HPD_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) connected = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) case RADEON_HPD_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) connected = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) case RADEON_HPD_3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) connected = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) case RADEON_HPD_4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) connected = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) /* DCE 3.2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) case RADEON_HPD_5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) connected = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) case RADEON_HPD_6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) connected = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) switch (hpd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) case RADEON_HPD_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) connected = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) case RADEON_HPD_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) connected = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) case RADEON_HPD_3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) connected = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) return connected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) void r600_hpd_set_polarity(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) enum radeon_hpd_id hpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) bool connected = r600_hpd_sense(rdev, hpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) if (ASIC_IS_DCE3(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) switch (hpd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) case RADEON_HPD_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) tmp = RREG32(DC_HPD1_INT_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) if (connected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) tmp &= ~DC_HPDx_INT_POLARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) tmp |= DC_HPDx_INT_POLARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) WREG32(DC_HPD1_INT_CONTROL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) case RADEON_HPD_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) tmp = RREG32(DC_HPD2_INT_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) if (connected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) tmp &= ~DC_HPDx_INT_POLARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) tmp |= DC_HPDx_INT_POLARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) WREG32(DC_HPD2_INT_CONTROL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) case RADEON_HPD_3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) tmp = RREG32(DC_HPD3_INT_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) if (connected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) tmp &= ~DC_HPDx_INT_POLARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) tmp |= DC_HPDx_INT_POLARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) WREG32(DC_HPD3_INT_CONTROL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) case RADEON_HPD_4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) tmp = RREG32(DC_HPD4_INT_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) if (connected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) tmp &= ~DC_HPDx_INT_POLARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) tmp |= DC_HPDx_INT_POLARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) WREG32(DC_HPD4_INT_CONTROL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) case RADEON_HPD_5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) tmp = RREG32(DC_HPD5_INT_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) if (connected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) tmp &= ~DC_HPDx_INT_POLARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) tmp |= DC_HPDx_INT_POLARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) WREG32(DC_HPD5_INT_CONTROL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) /* DCE 3.2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) case RADEON_HPD_6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) tmp = RREG32(DC_HPD6_INT_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) if (connected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) tmp &= ~DC_HPDx_INT_POLARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) tmp |= DC_HPDx_INT_POLARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) WREG32(DC_HPD6_INT_CONTROL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) switch (hpd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) case RADEON_HPD_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) if (connected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) case RADEON_HPD_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) if (connected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) case RADEON_HPD_3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) if (connected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) void r600_hpd_init(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) struct drm_device *dev = rdev->ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) struct drm_connector *connector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) unsigned enable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) struct radeon_connector *radeon_connector = to_radeon_connector(connector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) /* don't try to enable hpd on eDP or LVDS avoid breaking the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) * aux dp channel on imac and help (but not completely fix)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) * https://bugzilla.redhat.com/show_bug.cgi?id=726143
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) if (ASIC_IS_DCE3(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) if (ASIC_IS_DCE32(rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) tmp |= DC_HPDx_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) switch (radeon_connector->hpd.hpd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) case RADEON_HPD_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) WREG32(DC_HPD1_CONTROL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) case RADEON_HPD_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) WREG32(DC_HPD2_CONTROL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) case RADEON_HPD_3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) WREG32(DC_HPD3_CONTROL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) case RADEON_HPD_4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) WREG32(DC_HPD4_CONTROL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) /* DCE 3.2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) case RADEON_HPD_5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) WREG32(DC_HPD5_CONTROL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) case RADEON_HPD_6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) WREG32(DC_HPD6_CONTROL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) switch (radeon_connector->hpd.hpd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) case RADEON_HPD_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) case RADEON_HPD_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) case RADEON_HPD_3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) enable |= 1 << radeon_connector->hpd.hpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) radeon_irq_kms_enable_hpd(rdev, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) void r600_hpd_fini(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) struct drm_device *dev = rdev->ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) struct drm_connector *connector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) unsigned disable = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) struct radeon_connector *radeon_connector = to_radeon_connector(connector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) if (ASIC_IS_DCE3(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) switch (radeon_connector->hpd.hpd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) case RADEON_HPD_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) WREG32(DC_HPD1_CONTROL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) case RADEON_HPD_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) WREG32(DC_HPD2_CONTROL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) case RADEON_HPD_3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) WREG32(DC_HPD3_CONTROL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) case RADEON_HPD_4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) WREG32(DC_HPD4_CONTROL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) /* DCE 3.2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) case RADEON_HPD_5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) WREG32(DC_HPD5_CONTROL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) case RADEON_HPD_6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) WREG32(DC_HPD6_CONTROL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) switch (radeon_connector->hpd.hpd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) case RADEON_HPD_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) case RADEON_HPD_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) case RADEON_HPD_3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) disable |= 1 << radeon_connector->hpd.hpd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) radeon_irq_kms_disable_hpd(rdev, disable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) * R600 PCIE GART
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) /* flush hdp cache so updates hit vram */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) !(rdev->flags & RADEON_IS_AGP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) void __iomem *ptr = (void *)rdev->gart.ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) * This seems to cause problems on some AGP cards. Just use the old
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) * method for them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) WREG32(HDP_DEBUG1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) tmp = readl((void __iomem *)ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) for (i = 0; i < rdev->usec_timeout; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) /* read MC_STATUS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) if (tmp == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) pr_warn("[drm] r600 flush TLB failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) if (tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) int r600_pcie_gart_init(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) if (rdev->gart.robj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) WARN(1, "R600 PCIE GART already initialized\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) /* Initialize common gart structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) r = radeon_gart_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) return radeon_gart_table_vram_alloc(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) static int r600_pcie_gart_enable(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) int r, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) if (rdev->gart.robj == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) r = radeon_gart_table_vram_pin(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) /* Setup L2 cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) EFFECTIVE_L2_QUEUE_SIZE(7));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) WREG32(VM_L2_CNTL2, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) /* Setup TLB control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) SYSTEM_ACCESS_MODE_NOT_IN_SYS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) ENABLE_WAIT_L2_QUERY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) WREG32(MC_VM_L1_TLB_MCB_RD_UVD_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) WREG32(MC_VM_L1_TLB_MCB_WR_UVD_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) (u32)(rdev->dummy_page.addr >> 12));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) for (i = 1; i < 7; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) r600_pcie_gart_tlb_flush(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) (unsigned)(rdev->mc.gtt_size >> 20),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) (unsigned long long)rdev->gart.table_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) rdev->gart.ready = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) static void r600_pcie_gart_disable(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) /* Disable all tables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) for (i = 0; i < 7; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) /* Disable L2 cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) EFFECTIVE_L2_QUEUE_SIZE(7));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) /* Setup L1 TLB control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) ENABLE_WAIT_L2_QUERY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) WREG32(MC_VM_L1_TLB_MCB_RD_UVD_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) WREG32(MC_VM_L1_TLB_MCB_WR_UVD_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) radeon_gart_table_vram_unpin(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) static void r600_pcie_gart_fini(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) radeon_gart_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) r600_pcie_gart_disable(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) radeon_gart_table_vram_free(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) static void r600_agp_enable(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) /* Setup L2 cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) EFFECTIVE_L2_QUEUE_SIZE(7));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) WREG32(VM_L2_CNTL2, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) /* Setup TLB control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) SYSTEM_ACCESS_MODE_NOT_IN_SYS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) ENABLE_WAIT_L2_QUERY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) for (i = 0; i < 7; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) int r600_mc_wait_for_idle(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) for (i = 0; i < rdev->usec_timeout; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) /* read MC_STATUS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) if (!tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) uint32_t r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) spin_lock_irqsave(&rdev->mc_idx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) r = RREG32(R_0028FC_MC_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) spin_lock_irqsave(&rdev->mc_idx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) S_0028F8_MC_IND_WR_EN(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) WREG32(R_0028FC_MC_DATA, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) WREG32(R_0028F8_MC_INDEX, 0x7F);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) static void r600_mc_program(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) struct rv515_mc_save save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) /* Initialize HDP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) for (i = 0, j = 0; i < 32; i++, j += 0x18) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) WREG32((0x2c14 + j), 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) WREG32((0x2c18 + j), 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) WREG32((0x2c1c + j), 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) WREG32((0x2c20 + j), 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) WREG32((0x2c24 + j), 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) rv515_mc_stop(rdev, &save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) if (r600_mc_wait_for_idle(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) /* Lockout access through VGA aperture (doesn't exist before R600) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) /* Update configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) if (rdev->flags & RADEON_IS_AGP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) if (rdev->mc.vram_start < rdev->mc.gtt_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) /* VRAM before AGP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) rdev->mc.vram_start >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) rdev->mc.gtt_end >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) /* VRAM after AGP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) rdev->mc.gtt_start >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) rdev->mc.vram_end >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) WREG32(MC_VM_FB_LOCATION, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) WREG32(HDP_NONSURFACE_INFO, (2 << 7));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) if (rdev->flags & RADEON_IS_AGP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) WREG32(MC_VM_AGP_BASE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) if (r600_mc_wait_for_idle(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) rv515_mc_resume(rdev, &save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) /* we need to own VRAM, so turn off the VGA renderer here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) * to stop it overwriting our objects */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) rv515_vga_render_disable(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) * r600_vram_gtt_location - try to find VRAM & GTT location
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) * @rdev: radeon device structure holding all necessary informations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) * @mc: memory controller structure holding memory informations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) * Function will place try to place VRAM at same place as in CPU (PCI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) * address space as some GPU seems to have issue when we reprogram at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) * different address space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) * If there is not enough space to fit the unvisible VRAM after the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) * aperture then we limit the VRAM size to the aperture.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) * If we are using AGP then place VRAM adjacent to AGP aperture are we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) * them to be in one from GPU point of view so that we can program GPU to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) * catch access outside them (weird GPU policy see ??).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) * This function will never fails, worst case are limiting VRAM or GTT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) * Note: GTT start, end, size should be initialized before calling this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) * function on AGP platform.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) u64 size_bf, size_af;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) if (mc->mc_vram_size > 0xE0000000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) /* leave room for at least 512M GTT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) dev_warn(rdev->dev, "limiting VRAM\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) mc->real_vram_size = 0xE0000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) mc->mc_vram_size = 0xE0000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) if (rdev->flags & RADEON_IS_AGP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) size_bf = mc->gtt_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) size_af = mc->mc_mask - mc->gtt_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) if (size_bf > size_af) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) if (mc->mc_vram_size > size_bf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) dev_warn(rdev->dev, "limiting VRAM\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) mc->real_vram_size = size_bf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) mc->mc_vram_size = size_bf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) mc->vram_start = mc->gtt_start - mc->mc_vram_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) if (mc->mc_vram_size > size_af) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) dev_warn(rdev->dev, "limiting VRAM\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) mc->real_vram_size = size_af;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) mc->mc_vram_size = size_af;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) mc->vram_start = mc->gtt_end + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) mc->mc_vram_size >> 20, mc->vram_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) mc->vram_end, mc->real_vram_size >> 20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) u64 base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) if (rdev->flags & RADEON_IS_IGP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) base <<= 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) radeon_vram_location(rdev, &rdev->mc, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) rdev->mc.gtt_base_align = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) radeon_gtt_location(rdev, mc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) static int r600_mc_init(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) int chansize, numchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) uint32_t h_addr, l_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) unsigned long long k8_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) /* Get VRAM informations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) rdev->mc.vram_is_ddr = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) tmp = RREG32(RAMCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) if (tmp & CHANSIZE_OVERRIDE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) chansize = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) } else if (tmp & CHANSIZE_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) chansize = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) chansize = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) tmp = RREG32(CHMAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) numchan = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) numchan = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) numchan = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) numchan = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) rdev->mc.vram_width = numchan * chansize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) /* Could aper size report 0 ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) /* Setup GPU memory space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) rdev->mc.visible_vram_size = rdev->mc.aper_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) r600_vram_gtt_location(rdev, &rdev->mc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) if (rdev->flags & RADEON_IS_IGP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) rs690_pm_info(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) /* Use K8 direct mapping for fast fb access. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) rdev->fastfb_working = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) h_addr = G_000012_K8_ADDR_EXT(RREG32_MC(R_000012_MC_MISC_UMA_CNTL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) l_addr = RREG32_MC(R_000011_K8_FB_LOCATION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) k8_addr = ((unsigned long long)h_addr) << 32 | l_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) if (k8_addr + rdev->mc.visible_vram_size < 0x100000000ULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) /* FastFB shall be used with UMA memory. Here it is simply disabled when sideport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) * memory is present.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) (unsigned long long)rdev->mc.aper_base, k8_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) rdev->mc.aper_base = (resource_size_t)k8_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) rdev->fastfb_working = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) radeon_update_bandwidth_info(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) int r600_vram_scratch_init(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) if (rdev->vram_scratch.robj == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 0, NULL, NULL, &rdev->vram_scratch.robj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) if (unlikely(r != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) r = radeon_bo_pin(rdev->vram_scratch.robj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) RADEON_GEM_DOMAIN_VRAM, &rdev->vram_scratch.gpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) radeon_bo_unreserve(rdev->vram_scratch.robj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) r = radeon_bo_kmap(rdev->vram_scratch.robj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) (void **)&rdev->vram_scratch.ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) radeon_bo_unpin(rdev->vram_scratch.robj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) radeon_bo_unreserve(rdev->vram_scratch.robj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) void r600_vram_scratch_fini(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) if (rdev->vram_scratch.robj == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) if (likely(r == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) radeon_bo_kunmap(rdev->vram_scratch.robj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) radeon_bo_unpin(rdev->vram_scratch.robj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) radeon_bo_unreserve(rdev->vram_scratch.robj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) radeon_bo_unref(&rdev->vram_scratch.robj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) u32 tmp = RREG32(R600_BIOS_3_SCRATCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) if (hung)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) WREG32(R600_BIOS_3_SCRATCH, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) static void r600_print_gpu_status_regs(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) RREG32(R_008010_GRBM_STATUS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) RREG32(R_008014_GRBM_STATUS2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) RREG32(R_000E50_SRBM_STATUS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) RREG32(CP_STALLED_STAT1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) RREG32(CP_STALLED_STAT2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) RREG32(CP_BUSY_STAT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) RREG32(CP_STAT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) RREG32(DMA_STATUS_REG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) static bool r600_is_display_hung(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) u32 crtc_hung = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) u32 crtc_status[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) u32 i, j, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) for (i = 0; i < rdev->num_crtc; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) crtc_status[i] = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) crtc_hung |= (1 << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) for (j = 0; j < 10; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) for (i = 0; i < rdev->num_crtc; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) if (crtc_hung & (1 << i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) tmp = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) if (tmp != crtc_status[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) crtc_hung &= ~(1 << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) if (crtc_hung == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) u32 r600_gpu_check_soft_reset(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) u32 reset_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) /* GRBM_STATUS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) tmp = RREG32(R_008010_GRBM_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) if (rdev->family >= CHIP_RV770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) G_008010_TA_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) reset_mask |= RADEON_RESET_GFX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) G_008010_TA03_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) reset_mask |= RADEON_RESET_GFX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) if (G_008010_CF_RQ_PENDING(tmp) | G_008010_PF_RQ_PENDING(tmp) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) G_008010_CP_BUSY(tmp) | G_008010_CP_COHERENCY_BUSY(tmp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) reset_mask |= RADEON_RESET_CP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) if (G_008010_GRBM_EE_BUSY(tmp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) /* DMA_STATUS_REG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) tmp = RREG32(DMA_STATUS_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) if (!(tmp & DMA_IDLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) reset_mask |= RADEON_RESET_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) /* SRBM_STATUS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) tmp = RREG32(R_000E50_SRBM_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) if (G_000E50_RLC_RQ_PENDING(tmp) | G_000E50_RLC_BUSY(tmp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) reset_mask |= RADEON_RESET_RLC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) if (G_000E50_IH_BUSY(tmp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) reset_mask |= RADEON_RESET_IH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) if (G_000E50_SEM_BUSY(tmp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) reset_mask |= RADEON_RESET_SEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) if (G_000E50_GRBM_RQ_PENDING(tmp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) reset_mask |= RADEON_RESET_GRBM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) if (G_000E50_VMC_BUSY(tmp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) reset_mask |= RADEON_RESET_VMC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) if (G_000E50_MCB_BUSY(tmp) | G_000E50_MCDZ_BUSY(tmp) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) G_000E50_MCDY_BUSY(tmp) | G_000E50_MCDX_BUSY(tmp) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) G_000E50_MCDW_BUSY(tmp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) reset_mask |= RADEON_RESET_MC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) if (r600_is_display_hung(rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) reset_mask |= RADEON_RESET_DISPLAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) /* Skip MC reset as it's mostly likely not hung, just busy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) if (reset_mask & RADEON_RESET_MC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) reset_mask &= ~RADEON_RESET_MC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) return reset_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) static void r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) struct rv515_mc_save save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) if (reset_mask == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) r600_print_gpu_status_regs(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) /* Disable CP parsing/prefetching */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) if (rdev->family >= CHIP_RV770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) /* disable the RLC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) WREG32(RLC_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) if (reset_mask & RADEON_RESET_DMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) /* Disable DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) tmp = RREG32(DMA_RB_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) tmp &= ~DMA_RB_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) WREG32(DMA_RB_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) mdelay(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) rv515_mc_stop(rdev, &save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) if (r600_mc_wait_for_idle(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) if (rdev->family >= CHIP_RV770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) grbm_soft_reset |= S_008020_SOFT_RESET_DB(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) S_008020_SOFT_RESET_CB(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) S_008020_SOFT_RESET_PA(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) S_008020_SOFT_RESET_SC(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) S_008020_SOFT_RESET_SPI(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) S_008020_SOFT_RESET_SX(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) S_008020_SOFT_RESET_SH(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) S_008020_SOFT_RESET_TC(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) S_008020_SOFT_RESET_TA(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) S_008020_SOFT_RESET_VC(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) S_008020_SOFT_RESET_VGT(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) grbm_soft_reset |= S_008020_SOFT_RESET_CR(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) S_008020_SOFT_RESET_DB(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) S_008020_SOFT_RESET_CB(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) S_008020_SOFT_RESET_PA(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) S_008020_SOFT_RESET_SC(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) S_008020_SOFT_RESET_SMX(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) S_008020_SOFT_RESET_SPI(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) S_008020_SOFT_RESET_SX(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) S_008020_SOFT_RESET_SH(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) S_008020_SOFT_RESET_TC(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) S_008020_SOFT_RESET_TA(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) S_008020_SOFT_RESET_VC(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) S_008020_SOFT_RESET_VGT(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) if (reset_mask & RADEON_RESET_CP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) grbm_soft_reset |= S_008020_SOFT_RESET_CP(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) S_008020_SOFT_RESET_VGT(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) if (reset_mask & RADEON_RESET_DMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) if (rdev->family >= CHIP_RV770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) srbm_soft_reset |= RV770_SOFT_RESET_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) srbm_soft_reset |= SOFT_RESET_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) if (reset_mask & RADEON_RESET_RLC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) srbm_soft_reset |= S_000E60_SOFT_RESET_RLC(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) if (reset_mask & RADEON_RESET_SEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) srbm_soft_reset |= S_000E60_SOFT_RESET_SEM(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) if (reset_mask & RADEON_RESET_IH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) srbm_soft_reset |= S_000E60_SOFT_RESET_IH(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) if (reset_mask & RADEON_RESET_GRBM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) if (!(rdev->flags & RADEON_IS_IGP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) if (reset_mask & RADEON_RESET_MC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) srbm_soft_reset |= S_000E60_SOFT_RESET_MC(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) if (reset_mask & RADEON_RESET_VMC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) srbm_soft_reset |= S_000E60_SOFT_RESET_VMC(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) if (grbm_soft_reset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) tmp = RREG32(R_008020_GRBM_SOFT_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) tmp |= grbm_soft_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) WREG32(R_008020_GRBM_SOFT_RESET, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) tmp = RREG32(R_008020_GRBM_SOFT_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) udelay(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) tmp &= ~grbm_soft_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) WREG32(R_008020_GRBM_SOFT_RESET, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) tmp = RREG32(R_008020_GRBM_SOFT_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) if (srbm_soft_reset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) tmp = RREG32(SRBM_SOFT_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) tmp |= srbm_soft_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) WREG32(SRBM_SOFT_RESET, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) tmp = RREG32(SRBM_SOFT_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) udelay(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) tmp &= ~srbm_soft_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) WREG32(SRBM_SOFT_RESET, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) tmp = RREG32(SRBM_SOFT_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) /* Wait a little for things to settle down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) rv515_mc_resume(rdev, &save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) udelay(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) r600_print_gpu_status_regs(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) static void r600_gpu_pci_config_reset(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) struct rv515_mc_save save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) u32 tmp, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) dev_info(rdev->dev, "GPU pci config reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) /* disable dpm? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) /* Disable CP parsing/prefetching */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) if (rdev->family >= CHIP_RV770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) /* disable the RLC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) WREG32(RLC_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) /* Disable DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) tmp = RREG32(DMA_RB_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) tmp &= ~DMA_RB_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) WREG32(DMA_RB_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) mdelay(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) /* set mclk/sclk to bypass */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) if (rdev->family >= CHIP_RV770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) rv770_set_clk_bypass_mode(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) /* disable BM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) pci_clear_master(rdev->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) /* disable mem access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) rv515_mc_stop(rdev, &save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) if (r600_mc_wait_for_idle(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) /* BIF reset workaround. Not sure if this is needed on 6xx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) tmp = RREG32(BUS_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) tmp |= VGA_COHE_SPEC_TIMER_DIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) WREG32(BUS_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) tmp = RREG32(BIF_SCRATCH0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) /* reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) radeon_pci_config_reset(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) /* BIF reset workaround. Not sure if this is needed on 6xx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) tmp = SOFT_RESET_BIF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) WREG32(SRBM_SOFT_RESET, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) WREG32(SRBM_SOFT_RESET, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) /* wait for asic to come out of reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) for (i = 0; i < rdev->usec_timeout; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) int r600_asic_reset(struct radeon_device *rdev, bool hard)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) u32 reset_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) if (hard) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) r600_gpu_pci_config_reset(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) reset_mask = r600_gpu_check_soft_reset(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) if (reset_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) r600_set_bios_scratch_engine_hung(rdev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) /* try soft reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) r600_gpu_soft_reset(rdev, reset_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) reset_mask = r600_gpu_check_soft_reset(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) /* try pci config reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) if (reset_mask && radeon_hard_reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) r600_gpu_pci_config_reset(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) reset_mask = r600_gpu_check_soft_reset(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) if (!reset_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) r600_set_bios_scratch_engine_hung(rdev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) * r600_gfx_is_lockup - Check if the GFX engine is locked up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) * @rdev: radeon_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) * @ring: radeon_ring structure holding ring information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) * Check if the GFX engine is locked up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) * Returns true if the engine appears to be locked up, false if not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) u32 reset_mask = r600_gpu_check_soft_reset(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) if (!(reset_mask & (RADEON_RESET_GFX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) RADEON_RESET_COMPUTE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) RADEON_RESET_CP))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) radeon_ring_lockup_update(rdev, ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) return radeon_ring_test_lockup(rdev, ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) u32 r6xx_remap_render_backend(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) u32 tiling_pipe_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) u32 max_rb_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) u32 total_max_rb_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) u32 disabled_rb_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) u32 rendering_pipe_num, rb_num_width, req_rb_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) u32 pipe_rb_ratio, pipe_rb_remain, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) u32 data = 0, mask = 1 << (max_rb_num - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) unsigned i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) /* mask out the RBs that don't exist on that asic */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) tmp = disabled_rb_mask | ((0xff << max_rb_num) & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) /* make sure at least one RB is available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) if ((tmp & 0xff) != 0xff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) disabled_rb_mask = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) rendering_pipe_num = 1 << tiling_pipe_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) BUG_ON(rendering_pipe_num < req_rb_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) pipe_rb_ratio = rendering_pipe_num / req_rb_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) pipe_rb_remain = rendering_pipe_num - pipe_rb_ratio * req_rb_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) if (rdev->family <= CHIP_RV740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) /* r6xx/r7xx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) rb_num_width = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) /* eg+ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) rb_num_width = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) for (i = 0; i < max_rb_num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) if (!(mask & disabled_rb_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) for (j = 0; j < pipe_rb_ratio; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) data <<= rb_num_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) data |= max_rb_num - i - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) if (pipe_rb_remain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) data <<= rb_num_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) data |= max_rb_num - i - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) pipe_rb_remain--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) mask >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) return data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) int r600_count_pipe_bits(uint32_t val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) return hweight32(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) static void r600_gpu_init(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) u32 tiling_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) u32 ramcfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) u32 cc_gc_shader_pipe_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) u32 sq_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) u32 sq_gpr_resource_mgmt_1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) u32 sq_gpr_resource_mgmt_2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) u32 sq_thread_resource_mgmt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) u32 sq_stack_resource_mgmt_1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) u32 sq_stack_resource_mgmt_2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) u32 disabled_rb_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) rdev->config.r600.tiling_group_size = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) switch (rdev->family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) case CHIP_R600:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) rdev->config.r600.max_pipes = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) rdev->config.r600.max_tile_pipes = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) rdev->config.r600.max_simds = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) rdev->config.r600.max_backends = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) rdev->config.r600.max_gprs = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) rdev->config.r600.max_threads = 192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) rdev->config.r600.max_stack_entries = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) rdev->config.r600.max_hw_contexts = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) rdev->config.r600.max_gs_threads = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) rdev->config.r600.sx_max_export_size = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) rdev->config.r600.sx_max_export_pos_size = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) rdev->config.r600.sx_max_export_smx_size = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) rdev->config.r600.sq_num_cf_insts = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) case CHIP_RV630:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) case CHIP_RV635:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) rdev->config.r600.max_pipes = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) rdev->config.r600.max_tile_pipes = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) rdev->config.r600.max_simds = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) rdev->config.r600.max_backends = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) rdev->config.r600.max_gprs = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) rdev->config.r600.max_threads = 192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) rdev->config.r600.max_stack_entries = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) rdev->config.r600.max_hw_contexts = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) rdev->config.r600.max_gs_threads = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) rdev->config.r600.sx_max_export_size = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) rdev->config.r600.sx_max_export_pos_size = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) rdev->config.r600.sx_max_export_smx_size = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) rdev->config.r600.sq_num_cf_insts = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) case CHIP_RV610:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) case CHIP_RV620:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) case CHIP_RS780:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) case CHIP_RS880:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) rdev->config.r600.max_pipes = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) rdev->config.r600.max_tile_pipes = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) rdev->config.r600.max_simds = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) rdev->config.r600.max_backends = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) rdev->config.r600.max_gprs = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) rdev->config.r600.max_threads = 192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) rdev->config.r600.max_stack_entries = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) rdev->config.r600.max_hw_contexts = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) rdev->config.r600.max_gs_threads = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) rdev->config.r600.sx_max_export_size = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) rdev->config.r600.sx_max_export_pos_size = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) rdev->config.r600.sx_max_export_smx_size = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) rdev->config.r600.sq_num_cf_insts = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) case CHIP_RV670:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) rdev->config.r600.max_pipes = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) rdev->config.r600.max_tile_pipes = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) rdev->config.r600.max_simds = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) rdev->config.r600.max_backends = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) rdev->config.r600.max_gprs = 192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) rdev->config.r600.max_threads = 192;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) rdev->config.r600.max_stack_entries = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) rdev->config.r600.max_hw_contexts = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) rdev->config.r600.max_gs_threads = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) rdev->config.r600.sx_max_export_size = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) rdev->config.r600.sx_max_export_pos_size = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) rdev->config.r600.sx_max_export_smx_size = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) rdev->config.r600.sq_num_cf_insts = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) /* Initialize HDP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) for (i = 0, j = 0; i < 32; i++, j += 0x18) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) WREG32((0x2c14 + j), 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) WREG32((0x2c18 + j), 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) WREG32((0x2c1c + j), 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) WREG32((0x2c20 + j), 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) WREG32((0x2c24 + j), 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) /* Setup tiling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) tiling_config = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) ramcfg = RREG32(RAMCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) switch (rdev->config.r600.max_tile_pipes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) tiling_config |= PIPE_TILING(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) tiling_config |= PIPE_TILING(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) tiling_config |= PIPE_TILING(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) tiling_config |= PIPE_TILING(3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) if (tmp > 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) tiling_config |= ROW_TILING(3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) tiling_config |= SAMPLE_SPLIT(3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) tiling_config |= ROW_TILING(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) tiling_config |= SAMPLE_SPLIT(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) tiling_config |= BANK_SWAPS(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) tmp = rdev->config.r600.max_simds -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) rdev->config.r600.active_simds = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) tmp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) for (i = 0; i < rdev->config.r600.max_backends; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) tmp |= (1 << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) /* if all the backends are disabled, fix it up here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) if ((disabled_rb_mask & tmp) == tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) for (i = 0; i < rdev->config.r600.max_backends; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) disabled_rb_mask &= ~(1 << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) R6XX_MAX_BACKENDS, disabled_rb_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) tiling_config |= tmp << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) rdev->config.r600.backend_map = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) rdev->config.r600.tile_config = tiling_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) WREG32(GB_TILING_CONFIG, tiling_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) WREG32(DMA_TILING_CONFIG, tiling_config & 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) /* Setup some CP states */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) SYNC_WALKER | SYNC_ALIGNER));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) /* Setup various GPU states */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) if (rdev->family == CHIP_RV670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) tmp = RREG32(SX_DEBUG_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) tmp |= SMX_EVENT_RELEASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) if ((rdev->family > CHIP_R600))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) tmp |= ENABLE_NEW_SMX_ADDRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) WREG32(SX_DEBUG_1, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) if (((rdev->family) == CHIP_R600) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) ((rdev->family) == CHIP_RV630) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) ((rdev->family) == CHIP_RV610) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) ((rdev->family) == CHIP_RV620) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) ((rdev->family) == CHIP_RS780) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) ((rdev->family) == CHIP_RS880)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) WREG32(DB_DEBUG, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) WREG32(VGT_NUM_INSTANCES, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) tmp = RREG32(SQ_MS_FIFO_SIZES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) if (((rdev->family) == CHIP_RV610) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) ((rdev->family) == CHIP_RV620) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) ((rdev->family) == CHIP_RS780) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) ((rdev->family) == CHIP_RS880)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) tmp = (CACHE_FIFO_SIZE(0xa) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) FETCH_FIFO_HIWATER(0xa) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) DONE_FIFO_HIWATER(0xe0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) ALU_UPDATE_FIFO_HIWATER(0x8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) } else if (((rdev->family) == CHIP_R600) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) ((rdev->family) == CHIP_RV630)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) tmp &= ~DONE_FIFO_HIWATER(0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) tmp |= DONE_FIFO_HIWATER(0x4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) WREG32(SQ_MS_FIFO_SIZES, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) * should be adjusted as needed by the 2D/3D drivers. This just sets default values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) sq_config = RREG32(SQ_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) sq_config &= ~(PS_PRIO(3) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) VS_PRIO(3) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) GS_PRIO(3) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) ES_PRIO(3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) sq_config |= (DX9_CONSTS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) VC_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) PS_PRIO(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) VS_PRIO(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) GS_PRIO(2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) ES_PRIO(3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) if ((rdev->family) == CHIP_R600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) NUM_VS_GPRS(124) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) NUM_CLAUSE_TEMP_GPRS(4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) NUM_ES_GPRS(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) NUM_VS_THREADS(48) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) NUM_GS_THREADS(4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) NUM_ES_THREADS(4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) NUM_VS_STACK_ENTRIES(128));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) NUM_ES_STACK_ENTRIES(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) } else if (((rdev->family) == CHIP_RV610) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) ((rdev->family) == CHIP_RV620) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) ((rdev->family) == CHIP_RS780) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) ((rdev->family) == CHIP_RS880)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) /* no vertex cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) sq_config &= ~VC_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) NUM_VS_GPRS(44) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) NUM_CLAUSE_TEMP_GPRS(2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) NUM_ES_GPRS(17));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) NUM_VS_THREADS(78) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) NUM_GS_THREADS(4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) NUM_ES_THREADS(31));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) NUM_VS_STACK_ENTRIES(40));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) NUM_ES_STACK_ENTRIES(16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) } else if (((rdev->family) == CHIP_RV630) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) ((rdev->family) == CHIP_RV635)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) NUM_VS_GPRS(44) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) NUM_CLAUSE_TEMP_GPRS(2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) NUM_ES_GPRS(18));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) NUM_VS_THREADS(78) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) NUM_GS_THREADS(4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) NUM_ES_THREADS(31));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) NUM_VS_STACK_ENTRIES(40));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) NUM_ES_STACK_ENTRIES(16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) } else if ((rdev->family) == CHIP_RV670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) NUM_VS_GPRS(44) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) NUM_CLAUSE_TEMP_GPRS(2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) NUM_ES_GPRS(17));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) NUM_VS_THREADS(78) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) NUM_GS_THREADS(4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) NUM_ES_THREADS(31));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) NUM_VS_STACK_ENTRIES(64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) NUM_ES_STACK_ENTRIES(64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) WREG32(SQ_CONFIG, sq_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) if (((rdev->family) == CHIP_RV610) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) ((rdev->family) == CHIP_RV620) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) ((rdev->family) == CHIP_RS780) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) ((rdev->family) == CHIP_RS880)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) /* More default values. 2D/3D driver should adjust as needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) S1_X(0x4) | S1_Y(0xc)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) S1_X(0x2) | S1_Y(0x2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) S2_X(0xa) | S2_Y(0x6) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) S3_X(0x6) | S3_Y(0xa)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) S1_X(0x4) | S1_Y(0xc) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) S2_X(0x1) | S2_Y(0x6) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) S3_X(0xa) | S3_Y(0xe)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) S5_X(0x0) | S5_Y(0x0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) S6_X(0xb) | S6_Y(0x4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) S7_X(0x7) | S7_Y(0x8)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) WREG32(VGT_STRMOUT_EN, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) tmp = rdev->config.r600.max_pipes * 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) switch (rdev->family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) case CHIP_RV610:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) case CHIP_RV620:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) case CHIP_RS780:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) case CHIP_RS880:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) tmp += 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) case CHIP_RV670:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) tmp += 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) if (tmp > 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) tmp = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) WREG32(VGT_ES_PER_GS, 128);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) WREG32(VGT_GS_PER_ES, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) WREG32(VGT_GS_PER_VS, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) WREG32(VGT_GS_VERTEX_REUSE, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) /* more default values. 2D/3D driver should adjust as needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) WREG32(VGT_STRMOUT_EN, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) WREG32(SX_MISC, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) WREG32(PA_SC_MODE_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) WREG32(PA_SC_AA_CONFIG, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) WREG32(PA_SC_LINE_STIPPLE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) WREG32(SPI_INPUT_Z, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) WREG32(CB_COLOR7_FRAG, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) /* Clear render buffer base addresses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) WREG32(CB_COLOR0_BASE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) WREG32(CB_COLOR1_BASE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) WREG32(CB_COLOR2_BASE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) WREG32(CB_COLOR3_BASE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) WREG32(CB_COLOR4_BASE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) WREG32(CB_COLOR5_BASE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) WREG32(CB_COLOR6_BASE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) WREG32(CB_COLOR7_BASE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) WREG32(CB_COLOR7_FRAG, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) switch (rdev->family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) case CHIP_RV610:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) case CHIP_RV620:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) case CHIP_RS780:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) case CHIP_RS880:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) tmp = TC_L2_SIZE(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) case CHIP_RV630:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) case CHIP_RV635:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) tmp = TC_L2_SIZE(4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) case CHIP_R600:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) tmp = TC_L2_SIZE(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) WREG32(TC_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) tmp = RREG32(HDP_HOST_PATH_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) WREG32(HDP_HOST_PATH_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) tmp = RREG32(ARB_POP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) tmp |= ENABLE_TC128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) WREG32(ARB_POP, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) NUM_CLIP_SEQ(3)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) WREG32(VC_ENHANCE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) * Indirect registers accessor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) u32 r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) (void)RREG32(PCIE_PORT_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) r = RREG32(PCIE_PORT_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) (void)RREG32(PCIE_PORT_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) WREG32(PCIE_PORT_DATA, (v));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) (void)RREG32(PCIE_PORT_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) * CP & Ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) void r600_cp_stop(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) WREG32(SCRATCH_UMSK, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) int r600_init_microcode(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) const char *chip_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) const char *rlc_chip_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) const char *smc_chip_name = "RV770";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) size_t pfp_req_size, me_req_size, rlc_req_size, smc_req_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) char fw_name[30];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) DRM_DEBUG("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) switch (rdev->family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) case CHIP_R600:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) chip_name = "R600";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) rlc_chip_name = "R600";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) case CHIP_RV610:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) chip_name = "RV610";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) rlc_chip_name = "R600";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) case CHIP_RV630:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) chip_name = "RV630";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) rlc_chip_name = "R600";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) case CHIP_RV620:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) chip_name = "RV620";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) rlc_chip_name = "R600";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) case CHIP_RV635:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) chip_name = "RV635";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) rlc_chip_name = "R600";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) case CHIP_RV670:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) chip_name = "RV670";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) rlc_chip_name = "R600";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) case CHIP_RS780:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) case CHIP_RS880:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) chip_name = "RS780";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) rlc_chip_name = "R600";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) case CHIP_RV770:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) chip_name = "RV770";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) rlc_chip_name = "R700";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) smc_chip_name = "RV770";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) smc_req_size = ALIGN(RV770_SMC_UCODE_SIZE, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) case CHIP_RV730:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) chip_name = "RV730";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) rlc_chip_name = "R700";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) smc_chip_name = "RV730";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) smc_req_size = ALIGN(RV730_SMC_UCODE_SIZE, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) case CHIP_RV710:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) chip_name = "RV710";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) rlc_chip_name = "R700";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) smc_chip_name = "RV710";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) smc_req_size = ALIGN(RV710_SMC_UCODE_SIZE, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) case CHIP_RV740:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) chip_name = "RV730";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) rlc_chip_name = "R700";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) smc_chip_name = "RV740";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) smc_req_size = ALIGN(RV740_SMC_UCODE_SIZE, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) case CHIP_CEDAR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) chip_name = "CEDAR";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) rlc_chip_name = "CEDAR";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) smc_chip_name = "CEDAR";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) smc_req_size = ALIGN(CEDAR_SMC_UCODE_SIZE, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) case CHIP_REDWOOD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) chip_name = "REDWOOD";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) rlc_chip_name = "REDWOOD";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) smc_chip_name = "REDWOOD";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) smc_req_size = ALIGN(REDWOOD_SMC_UCODE_SIZE, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) case CHIP_JUNIPER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) chip_name = "JUNIPER";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) rlc_chip_name = "JUNIPER";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) smc_chip_name = "JUNIPER";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) smc_req_size = ALIGN(JUNIPER_SMC_UCODE_SIZE, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) case CHIP_CYPRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) case CHIP_HEMLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) chip_name = "CYPRESS";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) rlc_chip_name = "CYPRESS";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) smc_chip_name = "CYPRESS";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) smc_req_size = ALIGN(CYPRESS_SMC_UCODE_SIZE, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) case CHIP_PALM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) chip_name = "PALM";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) rlc_chip_name = "SUMO";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) case CHIP_SUMO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) chip_name = "SUMO";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) rlc_chip_name = "SUMO";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) case CHIP_SUMO2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) chip_name = "SUMO2";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) rlc_chip_name = "SUMO";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) default: BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) if (rdev->family >= CHIP_CEDAR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) } else if (rdev->family >= CHIP_RV770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) pfp_req_size = R700_PFP_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) me_req_size = R700_PM4_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) rlc_req_size = R700_RLC_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) pfp_req_size = R600_PFP_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) me_req_size = R600_PM4_UCODE_SIZE * 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) rlc_req_size = R600_RLC_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) DRM_INFO("Loading %s Microcode\n", chip_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) if (rdev->pfp_fw->size != pfp_req_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) pr_err("r600_cp: Bogus length %zu in firmware \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) rdev->pfp_fw->size, fw_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) if (rdev->me_fw->size != me_req_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) pr_err("r600_cp: Bogus length %zu in firmware \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) rdev->me_fw->size, fw_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) if (rdev->rlc_fw->size != rlc_req_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) pr_err("r600_rlc: Bogus length %zu in firmware \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) rdev->rlc_fw->size, fw_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", smc_chip_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) pr_err("smc: error loading firmware \"%s\"\n", fw_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) release_firmware(rdev->smc_fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) rdev->smc_fw = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) } else if (rdev->smc_fw->size != smc_req_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) pr_err("smc: Bogus length %zu in firmware \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) rdev->smc_fw->size, fw_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) if (err != -EINVAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) pr_err("r600_cp: Failed to load firmware \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) fw_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) release_firmware(rdev->pfp_fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) rdev->pfp_fw = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) release_firmware(rdev->me_fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) rdev->me_fw = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) release_firmware(rdev->rlc_fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) rdev->rlc_fw = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) release_firmware(rdev->smc_fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) rdev->smc_fw = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) u32 r600_gfx_get_rptr(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) struct radeon_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) u32 rptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) if (rdev->wb.enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) rptr = rdev->wb.wb[ring->rptr_offs/4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) rptr = RREG32(R600_CP_RB_RPTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) return rptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) u32 r600_gfx_get_wptr(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) struct radeon_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) return RREG32(R600_CP_RB_WPTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) void r600_gfx_set_wptr(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) struct radeon_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) WREG32(R600_CP_RB_WPTR, ring->wptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) (void)RREG32(R600_CP_RB_WPTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) static int r600_cp_load_microcode(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) const __be32 *fw_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) if (!rdev->me_fw || !rdev->pfp_fw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) r600_cp_stop(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) WREG32(CP_RB_CNTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) #ifdef __BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) BUF_SWAP_32BIT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) /* Reset cp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) RREG32(GRBM_SOFT_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) mdelay(15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) WREG32(GRBM_SOFT_RESET, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) WREG32(CP_ME_RAM_WADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) fw_data = (const __be32 *)rdev->me_fw->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) WREG32(CP_ME_RAM_WADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) for (i = 0; i < R600_PM4_UCODE_SIZE * 3; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) WREG32(CP_ME_RAM_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) be32_to_cpup(fw_data++));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) fw_data = (const __be32 *)rdev->pfp_fw->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) WREG32(CP_PFP_UCODE_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) for (i = 0; i < R600_PFP_UCODE_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) WREG32(CP_PFP_UCODE_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) be32_to_cpup(fw_data++));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) WREG32(CP_PFP_UCODE_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) WREG32(CP_ME_RAM_WADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) WREG32(CP_ME_RAM_RADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) int r600_cp_start(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) uint32_t cp_me;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) r = radeon_ring_lock(rdev, ring, 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) radeon_ring_write(ring, 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) if (rdev->family >= CHIP_RV770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) radeon_ring_write(ring, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) radeon_ring_write(ring, rdev->config.rv770.max_hw_contexts - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) radeon_ring_write(ring, 0x3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) radeon_ring_write(ring, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) radeon_ring_write(ring, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) radeon_ring_unlock_commit(rdev, ring, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) cp_me = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) WREG32(R_0086D8_CP_ME_CNTL, cp_me);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) int r600_cp_resume(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) u32 rb_bufsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) /* Reset cp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) RREG32(GRBM_SOFT_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) mdelay(15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) WREG32(GRBM_SOFT_RESET, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) /* Set ring buffer size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) rb_bufsz = order_base_2(ring->ring_size / 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) #ifdef __BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) tmp |= BUF_SWAP_32BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) WREG32(CP_RB_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) WREG32(CP_SEM_WAIT_TIMER, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) /* Set the write pointer delay */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) WREG32(CP_RB_WPTR_DELAY, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) /* Initialize the ring buffer's read and write pointers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) WREG32(CP_RB_RPTR_WR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) ring->wptr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) WREG32(CP_RB_WPTR, ring->wptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) /* set the wb address whether it's enabled or not */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) WREG32(CP_RB_RPTR_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) if (rdev->wb.enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) WREG32(SCRATCH_UMSK, 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) tmp |= RB_NO_UPDATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) WREG32(SCRATCH_UMSK, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) WREG32(CP_RB_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) r600_cp_start(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) ring->ready = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) ring->ready = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) u32 rb_bufsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) /* Align ring size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) rb_bufsz = order_base_2(ring_size / 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) ring_size = (1 << (rb_bufsz + 1)) * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) ring->ring_size = ring_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) ring->align_mask = 16 - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) if (radeon_ring_supports_scratch_reg(rdev, ring)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) ring->rptr_save_reg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) void r600_cp_fini(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) r600_cp_stop(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) radeon_ring_fini(rdev, ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) radeon_scratch_free(rdev, ring->rptr_save_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) * GPU scratch registers helpers function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) void r600_scratch_init(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) rdev->scratch.num_reg = 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) rdev->scratch.reg_base = SCRATCH_REG0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) for (i = 0; i < rdev->scratch.num_reg; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) rdev->scratch.free[i] = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) uint32_t scratch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) uint32_t tmp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) r = radeon_scratch_get(rdev, &scratch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) WREG32(scratch, 0xCAFEDEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) r = radeon_ring_lock(rdev, ring, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) radeon_scratch_free(rdev, scratch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) radeon_ring_write(ring, 0xDEADBEEF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) radeon_ring_unlock_commit(rdev, ring, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) for (i = 0; i < rdev->usec_timeout; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) tmp = RREG32(scratch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) if (tmp == 0xDEADBEEF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) if (i < rdev->usec_timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) ring->idx, scratch, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) radeon_scratch_free(rdev, scratch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) * CP fences/semaphores
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) void r600_fence_ring_emit(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) struct radeon_fence *fence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) struct radeon_ring *ring = &rdev->ring[fence->ring];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) u32 cp_coher_cntl = PACKET3_TC_ACTION_ENA | PACKET3_VC_ACTION_ENA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) PACKET3_SH_ACTION_ENA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) if (rdev->family >= CHIP_RV770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) cp_coher_cntl |= PACKET3_FULL_CACHE_ENA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) if (rdev->wb.use_event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) /* flush read cache over gart */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) radeon_ring_write(ring, cp_coher_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) radeon_ring_write(ring, 0xFFFFFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) radeon_ring_write(ring, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) radeon_ring_write(ring, 10); /* poll interval */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) /* EVENT_WRITE_EOP - flush caches, send int */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) radeon_ring_write(ring, lower_32_bits(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) radeon_ring_write(ring, fence->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) radeon_ring_write(ring, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) /* flush read cache over gart */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) radeon_ring_write(ring, cp_coher_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) radeon_ring_write(ring, 0xFFFFFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) radeon_ring_write(ring, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) radeon_ring_write(ring, 10); /* poll interval */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) /* wait for 3D idle clean */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) radeon_ring_write(ring, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) /* Emit fence sequence & fire IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) radeon_ring_write(ring, fence->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) radeon_ring_write(ring, PACKET0(CP_INT_STATUS, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) radeon_ring_write(ring, RB_INT_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) * r600_semaphore_ring_emit - emit a semaphore on the CP ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) * @rdev: radeon_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) * @ring: radeon ring buffer object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) * @semaphore: radeon semaphore object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) * @emit_wait: Is this a sempahore wait?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) * from running ahead of semaphore waits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) bool r600_semaphore_ring_emit(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) struct radeon_ring *ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) struct radeon_semaphore *semaphore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) bool emit_wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) uint64_t addr = semaphore->gpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) if (rdev->family < CHIP_CAYMAN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) sel |= PACKET3_SEM_WAIT_ON_SIGNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) radeon_ring_write(ring, lower_32_bits(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) /* PFP_SYNC_ME packet only exists on 7xx+, only enable it on eg+ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) if (emit_wait && (rdev->family >= CHIP_CEDAR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) /* Prevent the PFP from running ahead of the semaphore wait */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) radeon_ring_write(ring, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) * r600_copy_cpdma - copy pages using the CP DMA engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) * @rdev: radeon_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) * @src_offset: src GPU address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) * @dst_offset: dst GPU address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) * @num_gpu_pages: number of GPU pages to xfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) * @fence: radeon fence object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) * Copy GPU paging using the CP DMA engine (r6xx+).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) * Used by the radeon ttm implementation to move pages if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) * registered as the asic copy callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) uint64_t src_offset, uint64_t dst_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) unsigned num_gpu_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) struct dma_resv *resv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) struct radeon_fence *fence;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) struct radeon_sync sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) int ring_index = rdev->asic->copy.blit_ring_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) struct radeon_ring *ring = &rdev->ring[ring_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) u32 size_in_bytes, cur_size_in_bytes, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) int i, num_loops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) radeon_sync_create(&sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) r = radeon_ring_lock(rdev, ring, num_loops * 6 + 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) DRM_ERROR("radeon: moving bo (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) radeon_sync_free(rdev, &sync, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) return ERR_PTR(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) radeon_sync_resv(rdev, &sync, resv, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) radeon_sync_rings(rdev, &sync, ring->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) radeon_ring_write(ring, WAIT_3D_IDLE_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) for (i = 0; i < num_loops; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) cur_size_in_bytes = size_in_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) if (cur_size_in_bytes > 0x1fffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) cur_size_in_bytes = 0x1fffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) size_in_bytes -= cur_size_in_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) tmp = upper_32_bits(src_offset) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) if (size_in_bytes == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) tmp |= PACKET3_CP_DMA_CP_SYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) radeon_ring_write(ring, PACKET3(PACKET3_CP_DMA, 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) radeon_ring_write(ring, lower_32_bits(src_offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) radeon_ring_write(ring, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) radeon_ring_write(ring, lower_32_bits(dst_offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) radeon_ring_write(ring, cur_size_in_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) src_offset += cur_size_in_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) dst_offset += cur_size_in_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) r = radeon_fence_emit(rdev, &fence, ring->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) radeon_ring_unlock_undo(rdev, ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) radeon_sync_free(rdev, &sync, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) return ERR_PTR(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) radeon_ring_unlock_commit(rdev, ring, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) radeon_sync_free(rdev, &sync, fence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) return fence;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) int r600_set_surface_reg(struct radeon_device *rdev, int reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) uint32_t tiling_flags, uint32_t pitch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) uint32_t offset, uint32_t obj_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) /* FIXME: implement */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) /* FIXME: implement */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) static void r600_uvd_init(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) if (!rdev->has_uvd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) r = radeon_uvd_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) dev_err(rdev->dev, "failed UVD (%d) init.\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) * At this point rdev->uvd.vcpu_bo is NULL which trickles down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) * to early fails uvd_v1_0_resume() and thus nothing happens
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) * there. So it is pointless to try to go through that code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) * hence why we disable uvd here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) rdev->has_uvd = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) static void r600_uvd_start(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) if (!rdev->has_uvd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) r = uvd_v1_0_resume(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) dev_err(rdev->dev, "failed UVD resume (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) static void r600_uvd_resume(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) struct radeon_ring *ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) r = uvd_v1_0_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) dev_err(rdev->dev, "failed initializing UVD (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) static int r600_startup(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) struct radeon_ring *ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) /* enable pcie gen2 link */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) r600_pcie_gen2_enable(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) /* scratch needs to be initialized before MC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) r = r600_vram_scratch_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) r600_mc_program(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) if (rdev->flags & RADEON_IS_AGP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) r600_agp_enable(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) r = r600_pcie_gart_enable(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) r600_gpu_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) /* allocate wb buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) r = radeon_wb_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) r600_uvd_start(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) /* Enable IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) if (!rdev->irq.installed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) r = radeon_irq_kms_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) r = r600_irq_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) DRM_ERROR("radeon: IH init failed (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) radeon_irq_kms_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) r600_irq_set(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) RADEON_CP_PACKET2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) r = r600_cp_load_microcode(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) r = r600_cp_resume(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) r600_uvd_resume(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) r = radeon_ib_pool_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) r = radeon_audio_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) DRM_ERROR("radeon: audio init failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) void r600_vga_set_state(struct radeon_device *rdev, bool state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) uint32_t temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) temp = RREG32(CONFIG_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) if (!state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) temp &= ~(1<<0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) temp |= (1<<1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) temp &= ~(1<<1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) WREG32(CONFIG_CNTL, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) int r600_resume(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) * posting will perform necessary task to bring back GPU into good
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) * shape.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) /* post card */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) atom_asic_init(rdev->mode_info.atom_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) if (rdev->pm.pm_method == PM_METHOD_DPM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) radeon_pm_resume(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) rdev->accel_working = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) r = r600_startup(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) DRM_ERROR("r600 startup failed on resume\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) rdev->accel_working = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) int r600_suspend(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) radeon_pm_suspend(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) radeon_audio_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) r600_cp_stop(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) if (rdev->has_uvd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) uvd_v1_0_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) radeon_uvd_suspend(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) r600_irq_suspend(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) radeon_wb_disable(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) r600_pcie_gart_disable(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) /* Plan is to move initialization in that function and use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) * helper function so that radeon_device_init pretty much
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) * do nothing more than calling asic specific function. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) * should also allow to remove a bunch of callback function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) * like vram_info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) int r600_init(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) if (r600_debugfs_mc_info_init(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) DRM_ERROR("Failed to register debugfs file for mc !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) /* Read BIOS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) if (!radeon_get_bios(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) if (ASIC_IS_AVIVO(rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) /* Must be an ATOMBIOS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) if (!rdev->is_atom_bios) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) r = radeon_atombios_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) /* Post card if necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) if (!radeon_card_posted(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) if (!rdev->bios) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) DRM_INFO("GPU not posted. posting now...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) atom_asic_init(rdev->mode_info.atom_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) /* Initialize scratch registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) r600_scratch_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) /* Initialize surface registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) radeon_surface_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) /* Initialize clocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) radeon_get_clock_info(rdev->ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) /* Fence driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) r = radeon_fence_driver_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) if (rdev->flags & RADEON_IS_AGP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) r = radeon_agp_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) radeon_agp_disable(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) r = r600_mc_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) /* Memory manager */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) r = radeon_bo_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) r = r600_init_microcode(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) DRM_ERROR("Failed to load firmware!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) /* Initialize power management */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) radeon_pm_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) r600_uvd_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) rdev->ih.ring_obj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) r600_ih_ring_init(rdev, 64 * 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) r = r600_pcie_gart_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) rdev->accel_working = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) r = r600_startup(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) dev_err(rdev->dev, "disabling GPU acceleration\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) r600_cp_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) r600_irq_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) radeon_wb_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) radeon_ib_pool_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) radeon_irq_kms_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) r600_pcie_gart_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) rdev->accel_working = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) void r600_fini(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) radeon_pm_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) radeon_audio_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) r600_cp_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) r600_irq_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) if (rdev->has_uvd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) uvd_v1_0_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) radeon_uvd_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) radeon_wb_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) radeon_ib_pool_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) radeon_irq_kms_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) r600_pcie_gart_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) r600_vram_scratch_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) radeon_agp_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) radeon_gem_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) radeon_fence_driver_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) radeon_bo_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) radeon_atombios_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) kfree(rdev->bios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) rdev->bios = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) * CS stuff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) struct radeon_ring *ring = &rdev->ring[ib->ring];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) u32 next_rptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) if (ring->rptr_save_reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) next_rptr = ring->wptr + 3 + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) radeon_ring_write(ring, ((ring->rptr_save_reg -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) radeon_ring_write(ring, next_rptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) } else if (rdev->wb.enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) next_rptr = ring->wptr + 5 + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) radeon_ring_write(ring, next_rptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) radeon_ring_write(ring, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) radeon_ring_write(ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) #ifdef __BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) (2 << 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) (ib->gpu_addr & 0xFFFFFFFC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) radeon_ring_write(ring, ib->length_dw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) struct radeon_ib ib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) uint32_t scratch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) uint32_t tmp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) r = radeon_scratch_get(rdev, &scratch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) WREG32(scratch, 0xCAFEDEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) DRM_ERROR("radeon: failed to get ib (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) goto free_scratch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) ib.ptr[2] = 0xDEADBEEF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) ib.length_dw = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) r = radeon_ib_schedule(rdev, &ib, NULL, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) goto free_ib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) r = radeon_fence_wait_timeout(ib.fence, false, usecs_to_jiffies(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) RADEON_USEC_IB_TEST_TIMEOUT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) if (r < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) DRM_ERROR("radeon: fence wait failed (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) goto free_ib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) } else if (r == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) DRM_ERROR("radeon: fence wait timed out.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) r = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) goto free_ib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) for (i = 0; i < rdev->usec_timeout; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) tmp = RREG32(scratch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) if (tmp == 0xDEADBEEF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) if (i < rdev->usec_timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) scratch, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) free_ib:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) radeon_ib_free(rdev, &ib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) free_scratch:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) radeon_scratch_free(rdev, scratch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) * Interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) * the same as the CP ring buffer, but in reverse. Rather than the CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) * writing to the ring and the GPU consuming, the GPU writes to the ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) * and host consumes. As the host irq handler processes interrupts, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) * increments the rptr. When the rptr catches up with the wptr, all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) * current interrupts have been processed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) u32 rb_bufsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) /* Align ring size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) rb_bufsz = order_base_2(ring_size / 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) ring_size = (1 << rb_bufsz) * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) rdev->ih.ring_size = ring_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) rdev->ih.rptr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) int r600_ih_ring_alloc(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) /* Allocate ring buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) if (rdev->ih.ring_obj == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) r = radeon_bo_create(rdev, rdev->ih.ring_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) PAGE_SIZE, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) RADEON_GEM_DOMAIN_GTT, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) NULL, NULL, &rdev->ih.ring_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) r = radeon_bo_reserve(rdev->ih.ring_obj, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) if (unlikely(r != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) r = radeon_bo_pin(rdev->ih.ring_obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) RADEON_GEM_DOMAIN_GTT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) &rdev->ih.gpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) radeon_bo_unreserve(rdev->ih.ring_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) r = radeon_bo_kmap(rdev->ih.ring_obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) (void **)&rdev->ih.ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) radeon_bo_unreserve(rdev->ih.ring_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) void r600_ih_ring_fini(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) if (rdev->ih.ring_obj) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) r = radeon_bo_reserve(rdev->ih.ring_obj, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) if (likely(r == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) radeon_bo_kunmap(rdev->ih.ring_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) radeon_bo_unpin(rdev->ih.ring_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) radeon_bo_unreserve(rdev->ih.ring_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) radeon_bo_unref(&rdev->ih.ring_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) rdev->ih.ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) rdev->ih.ring_obj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) void r600_rlc_stop(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) if ((rdev->family >= CHIP_RV770) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) (rdev->family <= CHIP_RV740)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) /* r7xx asics need to soft reset RLC before halting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) RREG32(SRBM_SOFT_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) mdelay(15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) WREG32(SRBM_SOFT_RESET, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) RREG32(SRBM_SOFT_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) WREG32(RLC_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) static void r600_rlc_start(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) WREG32(RLC_CNTL, RLC_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) static int r600_rlc_resume(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) const __be32 *fw_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) if (!rdev->rlc_fw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) r600_rlc_stop(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) WREG32(RLC_HB_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) WREG32(RLC_HB_BASE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) WREG32(RLC_HB_RPTR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) WREG32(RLC_HB_WPTR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) WREG32(RLC_MC_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) WREG32(RLC_UCODE_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) fw_data = (const __be32 *)rdev->rlc_fw->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) if (rdev->family >= CHIP_RV770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) WREG32(RLC_UCODE_ADDR, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) for (i = 0; i < R600_RLC_UCODE_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) WREG32(RLC_UCODE_ADDR, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) WREG32(RLC_UCODE_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) r600_rlc_start(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) static void r600_enable_interrupts(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) u32 ih_cntl = RREG32(IH_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) ih_cntl |= ENABLE_INTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) ih_rb_cntl |= IH_RB_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) WREG32(IH_CNTL, ih_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) WREG32(IH_RB_CNTL, ih_rb_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) rdev->ih.enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) void r600_disable_interrupts(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) u32 ih_cntl = RREG32(IH_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) ih_rb_cntl &= ~IH_RB_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) ih_cntl &= ~ENABLE_INTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) WREG32(IH_RB_CNTL, ih_rb_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) WREG32(IH_CNTL, ih_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) /* set rptr, wptr to 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) WREG32(IH_RB_RPTR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) WREG32(IH_RB_WPTR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) rdev->ih.enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) rdev->ih.rptr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) static void r600_disable_interrupt_state(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) WREG32(DMA_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) WREG32(GRBM_INT_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) WREG32(DxMODE_INT_MASK, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) WREG32(D2GRPH_INTERRUPT_CONTROL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) if (ASIC_IS_DCE3(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) WREG32(DC_HPD1_INT_CONTROL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) WREG32(DC_HPD2_INT_CONTROL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) WREG32(DC_HPD3_INT_CONTROL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) WREG32(DC_HPD4_INT_CONTROL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) if (ASIC_IS_DCE32(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) WREG32(DC_HPD5_INT_CONTROL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) WREG32(DC_HPD6_INT_CONTROL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) int r600_irq_init(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) int rb_bufsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) /* allocate ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) ret = r600_ih_ring_alloc(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) /* disable irqs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) r600_disable_interrupts(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) /* init rlc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) if (rdev->family >= CHIP_CEDAR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) ret = evergreen_rlc_resume(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) ret = r600_rlc_resume(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) r600_ih_ring_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) /* setup interrupt control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) /* set dummy read address to dummy page address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) interrupt_cntl = RREG32(INTERRUPT_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) WREG32(INTERRUPT_CNTL, interrupt_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) IH_WPTR_OVERFLOW_CLEAR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) (rb_bufsz << 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) if (rdev->wb.enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) /* set the writeback address whether it's enabled or not */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) WREG32(IH_RB_CNTL, ih_rb_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) /* set rptr, wptr to 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) WREG32(IH_RB_RPTR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) WREG32(IH_RB_WPTR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) /* Default settings for IH_CNTL (disabled at first) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) /* RPTR_REARM only works if msi's are enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) if (rdev->msi_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) ih_cntl |= RPTR_REARM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) WREG32(IH_CNTL, ih_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) /* force the active interrupt state to all disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) if (rdev->family >= CHIP_CEDAR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) evergreen_disable_interrupt_state(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) r600_disable_interrupt_state(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) /* at this point everything should be setup correctly to enable master */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) pci_set_master(rdev->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) /* enable irqs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) r600_enable_interrupts(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) void r600_irq_suspend(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) r600_irq_disable(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) r600_rlc_stop(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) void r600_irq_fini(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) r600_irq_suspend(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) r600_ih_ring_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) int r600_irq_set(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) u32 mode_int = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) u32 grbm_int_cntl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) u32 hdmi0, hdmi1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) u32 dma_cntl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) u32 thermal_int = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) if (!rdev->irq.installed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) /* don't enable anything if the ih is disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) if (!rdev->ih.enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) r600_disable_interrupts(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) /* force the active interrupt state to all disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) r600_disable_interrupt_state(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) if (ASIC_IS_DCE3(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) if (ASIC_IS_DCE32(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) hdmi0 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) hdmi1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) hdmi1 = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) thermal_int = RREG32(CG_THERMAL_INT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) } else if (rdev->family >= CHIP_RV770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) thermal_int = RREG32(RV770_CG_THERMAL_INT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) if (rdev->irq.dpm_thermal) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) DRM_DEBUG("dpm thermal\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) DRM_DEBUG("r600_irq_set: sw int\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) cp_int_cntl |= RB_INT_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) cp_int_cntl |= TIME_STAMP_INT_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) DRM_DEBUG("r600_irq_set: sw int dma\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) dma_cntl |= TRAP_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) if (rdev->irq.crtc_vblank_int[0] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) atomic_read(&rdev->irq.pflip[0])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) DRM_DEBUG("r600_irq_set: vblank 0\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) mode_int |= D1MODE_VBLANK_INT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) if (rdev->irq.crtc_vblank_int[1] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) atomic_read(&rdev->irq.pflip[1])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) DRM_DEBUG("r600_irq_set: vblank 1\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) mode_int |= D2MODE_VBLANK_INT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) if (rdev->irq.hpd[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) DRM_DEBUG("r600_irq_set: hpd 1\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) hpd1 |= DC_HPDx_INT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) if (rdev->irq.hpd[1]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) DRM_DEBUG("r600_irq_set: hpd 2\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) hpd2 |= DC_HPDx_INT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) if (rdev->irq.hpd[2]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) DRM_DEBUG("r600_irq_set: hpd 3\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) hpd3 |= DC_HPDx_INT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) if (rdev->irq.hpd[3]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) DRM_DEBUG("r600_irq_set: hpd 4\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) hpd4 |= DC_HPDx_INT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) if (rdev->irq.hpd[4]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) DRM_DEBUG("r600_irq_set: hpd 5\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) hpd5 |= DC_HPDx_INT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) if (rdev->irq.hpd[5]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) DRM_DEBUG("r600_irq_set: hpd 6\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) hpd6 |= DC_HPDx_INT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) if (rdev->irq.afmt[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) DRM_DEBUG("r600_irq_set: hdmi 0\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) hdmi0 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) if (rdev->irq.afmt[1]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) DRM_DEBUG("r600_irq_set: hdmi 0\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) hdmi1 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) WREG32(CP_INT_CNTL, cp_int_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) WREG32(DMA_CNTL, dma_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) WREG32(DxMODE_INT_MASK, mode_int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) WREG32(D1GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) WREG32(D2GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) WREG32(GRBM_INT_CNTL, grbm_int_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) if (ASIC_IS_DCE3(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) WREG32(DC_HPD1_INT_CONTROL, hpd1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) WREG32(DC_HPD2_INT_CONTROL, hpd2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) WREG32(DC_HPD3_INT_CONTROL, hpd3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) WREG32(DC_HPD4_INT_CONTROL, hpd4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) if (ASIC_IS_DCE32(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) WREG32(DC_HPD5_INT_CONTROL, hpd5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) WREG32(DC_HPD6_INT_CONTROL, hpd6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, hdmi0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, hdmi1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) WREG32(HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) WREG32(CG_THERMAL_INT, thermal_int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) } else if (rdev->family >= CHIP_RV770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) WREG32(RV770_CG_THERMAL_INT, thermal_int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) /* posting read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) RREG32(R_000E50_SRBM_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) static void r600_irq_ack(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) if (ASIC_IS_DCE3(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) if (ASIC_IS_DCE32(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) rdev->irq.stat_regs.r600.hdmi0_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) rdev->irq.stat_regs.r600.hdmi1_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) rdev->irq.stat_regs.r600.hdmi1_status = RREG32(DCE3_HDMI1_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) rdev->irq.stat_regs.r600.hdmi1_status = RREG32(HDMI1_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) if (ASIC_IS_DCE3(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) tmp = RREG32(DC_HPD1_INT_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) tmp |= DC_HPDx_INT_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) WREG32(DC_HPD1_INT_CONTROL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) tmp |= DC_HPDx_INT_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) if (ASIC_IS_DCE3(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) tmp = RREG32(DC_HPD2_INT_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) tmp |= DC_HPDx_INT_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) WREG32(DC_HPD2_INT_CONTROL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) tmp |= DC_HPDx_INT_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) if (ASIC_IS_DCE3(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) tmp = RREG32(DC_HPD3_INT_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) tmp |= DC_HPDx_INT_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) WREG32(DC_HPD3_INT_CONTROL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) tmp |= DC_HPDx_INT_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) tmp = RREG32(DC_HPD4_INT_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) tmp |= DC_HPDx_INT_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) WREG32(DC_HPD4_INT_CONTROL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) if (ASIC_IS_DCE32(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) tmp = RREG32(DC_HPD5_INT_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) tmp |= DC_HPDx_INT_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) WREG32(DC_HPD5_INT_CONTROL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) tmp = RREG32(DC_HPD6_INT_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) tmp |= DC_HPDx_INT_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) WREG32(DC_HPD6_INT_CONTROL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) if (rdev->irq.stat_regs.r600.hdmi0_status & AFMT_AZ_FORMAT_WTRIG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) if (rdev->irq.stat_regs.r600.hdmi1_status & AFMT_AZ_FORMAT_WTRIG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) if (ASIC_IS_DCE3(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) void r600_irq_disable(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) r600_disable_interrupts(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) /* Wait and acknowledge irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) r600_irq_ack(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) r600_disable_interrupt_state(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) static u32 r600_get_ih_wptr(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) u32 wptr, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) if (rdev->wb.enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) wptr = RREG32(IH_RB_WPTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) if (wptr & RB_OVERFLOW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) wptr &= ~RB_OVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) /* When a ring buffer overflow happen start parsing interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) * from the last not overwritten vector (wptr + 16). Hopefully
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) * this should allow us to catchup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) tmp = RREG32(IH_RB_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) tmp |= IH_WPTR_OVERFLOW_CLEAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) WREG32(IH_RB_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) return (wptr & rdev->ih.ptr_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) /* r600 IV Ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) * Each IV ring entry is 128 bits:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) * [7:0] - interrupt source id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) * [31:8] - reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) * [59:32] - interrupt source data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) * [127:60] - reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) * The basic interrupt vector entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) * are decoded as follows:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) * src_id src_data description
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) * 1 0 D1 Vblank
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) * 1 1 D1 Vline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) * 5 0 D2 Vblank
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) * 5 1 D2 Vline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) * 19 0 FP Hot plug detection A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) * 19 1 FP Hot plug detection B
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) * 19 2 DAC A auto-detection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) * 19 3 DAC B auto-detection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) * 21 4 HDMI block A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) * 21 5 HDMI block B
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) * 176 - CP_INT RB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) * 177 - CP_INT IB1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) * 178 - CP_INT IB2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) * 181 - EOP Interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) * 233 - GUI Idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) * Note, these are based on r600 and may need to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) * adjusted or added to on newer asics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) int r600_irq_process(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) u32 wptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) u32 rptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) u32 src_id, src_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) u32 ring_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) bool queue_hotplug = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) bool queue_hdmi = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) bool queue_thermal = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) if (!rdev->ih.enabled || rdev->shutdown)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) /* No MSIs, need a dummy read to flush PCI DMAs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) if (!rdev->msi_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) RREG32(IH_RB_WPTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) wptr = r600_get_ih_wptr(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) restart_ih:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) /* is somebody else already processing irqs? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) if (atomic_xchg(&rdev->ih.lock, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) rptr = rdev->ih.rptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) /* Order reading of wptr vs. reading of IH ring data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) /* display interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) r600_irq_ack(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) while (rptr != wptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) /* wptr/rptr are in bytes! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) ring_index = rptr / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) switch (src_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) case 1: /* D1 vblank/vline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) switch (src_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) case 0: /* D1 vblank */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) if (rdev->irq.crtc_vblank_int[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) drm_handle_vblank(rdev->ddev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) rdev->pm.vblank_sync = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) wake_up(&rdev->irq.vblank_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) if (atomic_read(&rdev->irq.pflip[0]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) radeon_crtc_handle_vblank(rdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) DRM_DEBUG("IH: D1 vblank\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) case 1: /* D1 vline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) DRM_DEBUG("IH: D1 vline\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) case 5: /* D2 vblank/vline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) switch (src_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) case 0: /* D2 vblank */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) if (rdev->irq.crtc_vblank_int[1]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) drm_handle_vblank(rdev->ddev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) rdev->pm.vblank_sync = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) wake_up(&rdev->irq.vblank_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) if (atomic_read(&rdev->irq.pflip[1]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) radeon_crtc_handle_vblank(rdev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) DRM_DEBUG("IH: D2 vblank\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) case 1: /* D1 vline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) DRM_DEBUG("IH: D2 vline\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) case 9: /* D1 pflip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) DRM_DEBUG("IH: D1 flip\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) if (radeon_use_pflipirq > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) radeon_crtc_handle_flip(rdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) case 11: /* D2 pflip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) DRM_DEBUG("IH: D2 flip\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) if (radeon_use_pflipirq > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) radeon_crtc_handle_flip(rdev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) case 19: /* HPD/DAC hotplug */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) switch (src_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) DRM_DEBUG("IH: HPD1 - IH event w/o asserted irq bit?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) queue_hotplug = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) DRM_DEBUG("IH: HPD1\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) DRM_DEBUG("IH: HPD2 - IH event w/o asserted irq bit?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) queue_hotplug = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) DRM_DEBUG("IH: HPD2\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) DRM_DEBUG("IH: HPD3 - IH event w/o asserted irq bit?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) queue_hotplug = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) DRM_DEBUG("IH: HPD3\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) case 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) DRM_DEBUG("IH: HPD4 - IH event w/o asserted irq bit?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) queue_hotplug = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) DRM_DEBUG("IH: HPD4\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) case 10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) DRM_DEBUG("IH: HPD5 - IH event w/o asserted irq bit?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) queue_hotplug = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) DRM_DEBUG("IH: HPD5\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) case 12:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) DRM_DEBUG("IH: HPD6 - IH event w/o asserted irq bit?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) queue_hotplug = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) DRM_DEBUG("IH: HPD6\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) case 21: /* hdmi */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) switch (src_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) if (!(rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) DRM_DEBUG("IH: HDMI0 - IH event w/o asserted irq bit?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) queue_hdmi = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) DRM_DEBUG("IH: HDMI0\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) case 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) if (!(rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) DRM_DEBUG("IH: HDMI1 - IH event w/o asserted irq bit?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) queue_hdmi = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) DRM_DEBUG("IH: HDMI1\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) case 124: /* UVD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) case 176: /* CP_INT in ring buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) case 177: /* CP_INT in IB1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) case 178: /* CP_INT in IB2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) case 181: /* CP EOP event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) DRM_DEBUG("IH: CP EOP\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) case 224: /* DMA trap event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) DRM_DEBUG("IH: DMA trap\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) case 230: /* thermal low to high */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) DRM_DEBUG("IH: thermal low to high\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) rdev->pm.dpm.thermal.high_to_low = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) queue_thermal = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) case 231: /* thermal high to low */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) DRM_DEBUG("IH: thermal high to low\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) rdev->pm.dpm.thermal.high_to_low = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) queue_thermal = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) case 233: /* GUI IDLE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) DRM_DEBUG("IH: GUI idle\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) /* wptr/rptr are in bytes! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) rptr += 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) rptr &= rdev->ih.ptr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) WREG32(IH_RB_RPTR, rptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) if (queue_hotplug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) schedule_delayed_work(&rdev->hotplug_work, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) if (queue_hdmi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) schedule_work(&rdev->audio_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) if (queue_thermal && rdev->pm.dpm_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) schedule_work(&rdev->pm.dpm.thermal.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) rdev->ih.rptr = rptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) atomic_set(&rdev->ih.lock, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) /* make sure wptr hasn't changed while processing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) wptr = r600_get_ih_wptr(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) if (wptr != rptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) goto restart_ih;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) * Debugfs info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) #if defined(CONFIG_DEBUG_FS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) static int r600_debugfs_mc_info(struct seq_file *m, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) struct drm_info_node *node = (struct drm_info_node *) m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) struct drm_device *dev = node->minor->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) struct radeon_device *rdev = dev->dev_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) DREG32_SYS(m, rdev, VM_L2_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) static struct drm_info_list r600_mc_info_list[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) {"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) int r600_debugfs_mc_info_init(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) #if defined(CONFIG_DEBUG_FS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) * r600_mmio_hdp_flush - flush Host Data Path cache via MMIO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) * rdev: radeon device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) * Some R6XX/R7XX don't seem to take into account HDP flushes performed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) * through the ring buffer. This leads to corruption in rendering, see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) * http://bugzilla.kernel.org/show_bug.cgi?id=15186 . To avoid this, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) * directly perform the HDP flush by writing the register through MMIO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) void r600_mmio_hdp_flush(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) * This seems to cause problems on some AGP cards. Just use the old
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) * method for them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) WREG32(HDP_DEBUG1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) tmp = readl((void __iomem *)ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) u32 link_width_cntl, mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) if (rdev->flags & RADEON_IS_IGP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) if (!(rdev->flags & RADEON_IS_PCIE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) /* x2 cards have a special sequence */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) if (ASIC_IS_X2(rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) radeon_gui_idle(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) switch (lanes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) case 12:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) /* not actually supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) case 16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) DRM_ERROR("invalid pcie lane request: %d\n", lanes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) link_width_cntl &= ~RADEON_PCIE_LC_LINK_WIDTH_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) link_width_cntl |= mask << RADEON_PCIE_LC_LINK_WIDTH_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) link_width_cntl |= (RADEON_PCIE_LC_RECONFIG_NOW |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) WREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) int r600_get_pcie_lanes(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456) u32 link_width_cntl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) if (rdev->flags & RADEON_IS_IGP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) if (!(rdev->flags & RADEON_IS_PCIE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) /* x2 cards have a special sequence */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) if (ASIC_IS_X2(rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) radeon_gui_idle(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473) case RADEON_PCIE_LC_LINK_WIDTH_X1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) case RADEON_PCIE_LC_LINK_WIDTH_X2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476) return 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477) case RADEON_PCIE_LC_LINK_WIDTH_X4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) return 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) case RADEON_PCIE_LC_LINK_WIDTH_X8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) return 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) case RADEON_PCIE_LC_LINK_WIDTH_X12:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) /* not actually supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) return 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) case RADEON_PCIE_LC_LINK_WIDTH_X0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485) case RADEON_PCIE_LC_LINK_WIDTH_X16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) return 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) static void r600_pcie_gen2_enable(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493) u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) u16 link_cntl2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496) if (radeon_pcie_gen2 == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499) if (rdev->flags & RADEON_IS_IGP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502) if (!(rdev->flags & RADEON_IS_PCIE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505) /* x2 cards have a special sequence */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506) if (ASIC_IS_X2(rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509) /* only RV6xx+ chips are supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510) if (rdev->family <= CHIP_R600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513) if ((rdev->pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514) (rdev->pdev->bus->max_bus_speed != PCIE_SPEED_8_0GT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517) speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) if (speed_cntl & LC_CURRENT_DATA_RATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519) DRM_INFO("PCIE gen 2 link speeds already enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523) DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525) /* 55 nm r6xx asics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526) if ((rdev->family == CHIP_RV670) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527) (rdev->family == CHIP_RV620) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528) (rdev->family == CHIP_RV635)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529) /* advertise upconfig capability */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530) link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531) link_width_cntl &= ~LC_UPCONFIGURE_DIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532) WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533) link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534) if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535) lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536) link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537) LC_RECONFIG_ARC_MISSING_ESCAPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538) link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539) WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541) link_width_cntl |= LC_UPCONFIGURE_DIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542) WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546) speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547) if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548) (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550) /* 55 nm r6xx asics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551) if ((rdev->family == CHIP_RV670) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552) (rdev->family == CHIP_RV620) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553) (rdev->family == CHIP_RV635)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554) WREG32(MM_CFGREGS_CNTL, 0x8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555) link_cntl2 = RREG32(0x4088);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556) WREG32(MM_CFGREGS_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557) /* not supported yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558) if (link_cntl2 & SELECTABLE_DEEMPHASIS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562) speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563) speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564) speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565) speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566) speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567) WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569) tmp = RREG32(0x541c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570) WREG32(0x541c, tmp | 0x8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571) WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572) link_cntl2 = RREG16(0x4088);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573) link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574) link_cntl2 |= 0x2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575) WREG16(0x4088, link_cntl2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576) WREG32(MM_CFGREGS_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578) if ((rdev->family == CHIP_RV670) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579) (rdev->family == CHIP_RV620) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580) (rdev->family == CHIP_RV635)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581) training_cntl = RREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582) training_cntl &= ~LC_POINT_7_PLUS_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583) WREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL, training_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585) speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586) speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587) WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590) speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591) speed_cntl |= LC_GEN2_EN_STRAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592) WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595) link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596) /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597) if (1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598) link_width_cntl |= LC_UPCONFIGURE_DIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600) link_width_cntl &= ~LC_UPCONFIGURE_DIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601) WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606) * r600_get_gpu_clock_counter - return GPU clock counter snapshot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608) * @rdev: radeon_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610) * Fetches a GPU clock counter snapshot (R6xx-cayman).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611) * Returns the 64 bit clock counter snapshot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613) uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615) uint64_t clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617) mutex_lock(&rdev->gpu_clock_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618) WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619) clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620) ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621) mutex_unlock(&rdev->gpu_clock_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622) return clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623) }