^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright 2011 Advanced Micro Devices, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Permission is hereby granted, free of charge, to any person obtaining a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * copy of this software and associated documentation files (the "Software"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * to deal in the Software without restriction, including without limitation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * the rights to use, copy, modify, merge, publish, distribute, sublicense,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * and/or sell copies of the Software, and to permit persons to whom the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Software is furnished to do so, subject to the following conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * The above copyright notice and this permission notice shall be included in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * all copies or substantial portions of the Software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * OTHER DEALINGS IN THE SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * Authors: Alex Deucher
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/firmware.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <drm/drm_vblank.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <drm/radeon_drm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include "atom.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include "clearstate_si.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include "radeon.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include "radeon_asic.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include "radeon_audio.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include "radeon_ucode.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include "si_blit_shaders.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include "sid.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) MODULE_FIRMWARE("radeon/TAHITI_pfp.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) MODULE_FIRMWARE("radeon/TAHITI_me.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) MODULE_FIRMWARE("radeon/TAHITI_ce.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) MODULE_FIRMWARE("radeon/TAHITI_mc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) MODULE_FIRMWARE("radeon/TAHITI_mc2.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) MODULE_FIRMWARE("radeon/TAHITI_rlc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) MODULE_FIRMWARE("radeon/TAHITI_smc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) MODULE_FIRMWARE("radeon/tahiti_pfp.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) MODULE_FIRMWARE("radeon/tahiti_me.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) MODULE_FIRMWARE("radeon/tahiti_ce.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) MODULE_FIRMWARE("radeon/tahiti_mc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) MODULE_FIRMWARE("radeon/tahiti_rlc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) MODULE_FIRMWARE("radeon/tahiti_smc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) MODULE_FIRMWARE("radeon/PITCAIRN_me.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) MODULE_FIRMWARE("radeon/PITCAIRN_ce.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) MODULE_FIRMWARE("radeon/PITCAIRN_mc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) MODULE_FIRMWARE("radeon/PITCAIRN_mc2.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) MODULE_FIRMWARE("radeon/PITCAIRN_rlc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) MODULE_FIRMWARE("radeon/PITCAIRN_smc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) MODULE_FIRMWARE("radeon/pitcairn_pfp.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) MODULE_FIRMWARE("radeon/pitcairn_me.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) MODULE_FIRMWARE("radeon/pitcairn_ce.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) MODULE_FIRMWARE("radeon/pitcairn_rlc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) MODULE_FIRMWARE("radeon/pitcairn_smc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) MODULE_FIRMWARE("radeon/pitcairn_k_smc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) MODULE_FIRMWARE("radeon/VERDE_pfp.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) MODULE_FIRMWARE("radeon/VERDE_me.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) MODULE_FIRMWARE("radeon/VERDE_ce.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) MODULE_FIRMWARE("radeon/VERDE_mc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) MODULE_FIRMWARE("radeon/VERDE_mc2.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) MODULE_FIRMWARE("radeon/VERDE_rlc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) MODULE_FIRMWARE("radeon/VERDE_smc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) MODULE_FIRMWARE("radeon/verde_pfp.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) MODULE_FIRMWARE("radeon/verde_me.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) MODULE_FIRMWARE("radeon/verde_ce.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) MODULE_FIRMWARE("radeon/verde_mc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) MODULE_FIRMWARE("radeon/verde_rlc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) MODULE_FIRMWARE("radeon/verde_smc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) MODULE_FIRMWARE("radeon/verde_k_smc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) MODULE_FIRMWARE("radeon/OLAND_pfp.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) MODULE_FIRMWARE("radeon/OLAND_me.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) MODULE_FIRMWARE("radeon/OLAND_ce.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) MODULE_FIRMWARE("radeon/OLAND_mc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) MODULE_FIRMWARE("radeon/OLAND_mc2.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) MODULE_FIRMWARE("radeon/OLAND_rlc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) MODULE_FIRMWARE("radeon/OLAND_smc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) MODULE_FIRMWARE("radeon/oland_pfp.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) MODULE_FIRMWARE("radeon/oland_me.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) MODULE_FIRMWARE("radeon/oland_ce.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) MODULE_FIRMWARE("radeon/oland_mc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) MODULE_FIRMWARE("radeon/oland_rlc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) MODULE_FIRMWARE("radeon/oland_smc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) MODULE_FIRMWARE("radeon/oland_k_smc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) MODULE_FIRMWARE("radeon/HAINAN_pfp.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) MODULE_FIRMWARE("radeon/HAINAN_me.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) MODULE_FIRMWARE("radeon/HAINAN_ce.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) MODULE_FIRMWARE("radeon/HAINAN_mc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) MODULE_FIRMWARE("radeon/HAINAN_mc2.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) MODULE_FIRMWARE("radeon/HAINAN_rlc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) MODULE_FIRMWARE("radeon/HAINAN_smc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) MODULE_FIRMWARE("radeon/hainan_pfp.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) MODULE_FIRMWARE("radeon/hainan_me.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) MODULE_FIRMWARE("radeon/hainan_ce.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) MODULE_FIRMWARE("radeon/hainan_mc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) MODULE_FIRMWARE("radeon/hainan_rlc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) MODULE_FIRMWARE("radeon/hainan_smc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) MODULE_FIRMWARE("radeon/si58_mc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static void si_pcie_gen3_enable(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static void si_program_aspm(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) extern void sumo_rlc_fini(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) extern int sumo_rlc_init(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) extern int r600_ih_ring_alloc(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) extern void r600_ih_ring_fini(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) extern bool evergreen_is_display_hung(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) bool enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static void si_init_pg(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static void si_init_cg(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static void si_fini_pg(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) static void si_fini_cg(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static void si_rlc_stop(struct radeon_device *rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static const u32 crtc_offsets[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) EVERGREEN_CRTC0_REGISTER_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) EVERGREEN_CRTC1_REGISTER_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) EVERGREEN_CRTC2_REGISTER_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) EVERGREEN_CRTC3_REGISTER_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) EVERGREEN_CRTC4_REGISTER_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) EVERGREEN_CRTC5_REGISTER_OFFSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static const u32 si_disp_int_status[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) DISP_INTERRUPT_STATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) DISP_INTERRUPT_STATUS_CONTINUE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) DISP_INTERRUPT_STATUS_CONTINUE2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) DISP_INTERRUPT_STATUS_CONTINUE3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) DISP_INTERRUPT_STATUS_CONTINUE4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) DISP_INTERRUPT_STATUS_CONTINUE5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) #define DC_HPDx_CONTROL(x) (DC_HPD1_CONTROL + (x * 0xc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) #define DC_HPDx_INT_CONTROL(x) (DC_HPD1_INT_CONTROL + (x * 0xc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) #define DC_HPDx_INT_STATUS_REG(x) (DC_HPD1_INT_STATUS + (x * 0xc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static const u32 verde_rlc_save_restore_register_list[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) (0x8000 << 16) | (0x98f4 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) (0x8040 << 16) | (0x98f4 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) (0x8000 << 16) | (0xe80 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) (0x8040 << 16) | (0xe80 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) (0x8000 << 16) | (0x89bc >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) (0x8040 << 16) | (0x89bc >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) (0x8000 << 16) | (0x8c1c >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) (0x8040 << 16) | (0x8c1c >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) (0x9c00 << 16) | (0x98f0 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) (0x9c00 << 16) | (0xe7c >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) (0x8000 << 16) | (0x9148 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) (0x8040 << 16) | (0x9148 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) (0x9c00 << 16) | (0x9150 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) (0x9c00 << 16) | (0x897c >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) (0x9c00 << 16) | (0x8d8c >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) (0x9c00 << 16) | (0xac54 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 0X00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 0x3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) (0x9c00 << 16) | (0x98f8 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) (0x9c00 << 16) | (0x9910 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) (0x9c00 << 16) | (0x9914 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) (0x9c00 << 16) | (0x9918 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) (0x9c00 << 16) | (0x991c >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) (0x9c00 << 16) | (0x9920 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) (0x9c00 << 16) | (0x9924 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) (0x9c00 << 16) | (0x9928 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) (0x9c00 << 16) | (0x992c >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) (0x9c00 << 16) | (0x9930 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) (0x9c00 << 16) | (0x9934 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) (0x9c00 << 16) | (0x9938 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) (0x9c00 << 16) | (0x993c >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) (0x9c00 << 16) | (0x9940 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) (0x9c00 << 16) | (0x9944 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) (0x9c00 << 16) | (0x9948 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) (0x9c00 << 16) | (0x994c >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) (0x9c00 << 16) | (0x9950 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) (0x9c00 << 16) | (0x9954 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) (0x9c00 << 16) | (0x9958 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) (0x9c00 << 16) | (0x995c >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) (0x9c00 << 16) | (0x9960 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) (0x9c00 << 16) | (0x9964 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) (0x9c00 << 16) | (0x9968 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) (0x9c00 << 16) | (0x996c >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) (0x9c00 << 16) | (0x9970 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) (0x9c00 << 16) | (0x9974 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) (0x9c00 << 16) | (0x9978 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) (0x9c00 << 16) | (0x997c >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) (0x9c00 << 16) | (0x9980 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) (0x9c00 << 16) | (0x9984 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) (0x9c00 << 16) | (0x9988 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) (0x9c00 << 16) | (0x998c >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) (0x9c00 << 16) | (0x8c00 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) (0x9c00 << 16) | (0x8c14 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) (0x9c00 << 16) | (0x8c04 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) (0x9c00 << 16) | (0x8c08 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) (0x8000 << 16) | (0x9b7c >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) (0x8040 << 16) | (0x9b7c >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) (0x8000 << 16) | (0xe84 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) (0x8040 << 16) | (0xe84 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) (0x8000 << 16) | (0x89c0 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) (0x8040 << 16) | (0x89c0 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) (0x8000 << 16) | (0x914c >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) (0x8040 << 16) | (0x914c >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) (0x8000 << 16) | (0x8c20 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) (0x8040 << 16) | (0x8c20 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) (0x8000 << 16) | (0x9354 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) (0x8040 << 16) | (0x9354 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) (0x9c00 << 16) | (0x9060 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) (0x9c00 << 16) | (0x9364 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) (0x9c00 << 16) | (0x9100 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) (0x9c00 << 16) | (0x913c >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) (0x8000 << 16) | (0x90e0 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) (0x8000 << 16) | (0x90e4 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) (0x8000 << 16) | (0x90e8 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) (0x8040 << 16) | (0x90e0 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) (0x8040 << 16) | (0x90e4 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) (0x8040 << 16) | (0x90e8 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) (0x9c00 << 16) | (0x8bcc >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) (0x9c00 << 16) | (0x8b24 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) (0x9c00 << 16) | (0x88c4 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) (0x9c00 << 16) | (0x8e50 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) (0x9c00 << 16) | (0x8c0c >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) (0x9c00 << 16) | (0x8e58 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) (0x9c00 << 16) | (0x8e5c >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) (0x9c00 << 16) | (0x9508 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) (0x9c00 << 16) | (0x950c >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) (0x9c00 << 16) | (0x9494 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) (0x9c00 << 16) | (0xac0c >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) (0x9c00 << 16) | (0xac10 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) (0x9c00 << 16) | (0xac14 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) (0x9c00 << 16) | (0xae00 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) (0x9c00 << 16) | (0xac08 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) (0x9c00 << 16) | (0x88d4 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) (0x9c00 << 16) | (0x88c8 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) (0x9c00 << 16) | (0x88cc >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) (0x9c00 << 16) | (0x89b0 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) (0x9c00 << 16) | (0x8b10 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) (0x9c00 << 16) | (0x8a14 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) (0x9c00 << 16) | (0x9830 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) (0x9c00 << 16) | (0x9834 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) (0x9c00 << 16) | (0x9838 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) (0x9c00 << 16) | (0x9a10 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) (0x8000 << 16) | (0x9870 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) (0x8000 << 16) | (0x9874 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) (0x8001 << 16) | (0x9870 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) (0x8001 << 16) | (0x9874 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) (0x8040 << 16) | (0x9870 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) (0x8040 << 16) | (0x9874 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) (0x8041 << 16) | (0x9870 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) (0x8041 << 16) | (0x9874 >> 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 0x00000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) static const u32 tahiti_golden_rlc_registers[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 0xc424, 0xffffffff, 0x00601005,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 0xc47c, 0xffffffff, 0x10104040,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 0xc488, 0xffffffff, 0x0100000a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 0xc314, 0xffffffff, 0x00000800,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 0xc30c, 0xffffffff, 0x800000f4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 0xf4a8, 0xffffffff, 0x00000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) static const u32 tahiti_golden_registers[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 0x9a10, 0x00010000, 0x00018208,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 0x9830, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 0x9834, 0xf00fffff, 0x00000400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 0x9838, 0x0002021c, 0x00020200,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 0xc78, 0x00000080, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 0xd030, 0x000300c0, 0x00800040,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 0xd830, 0x000300c0, 0x00800040,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 0x5bb0, 0x000000f0, 0x00000070,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 0x5bc0, 0x00200000, 0x50100000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 0x7030, 0x31000311, 0x00000011,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 0x277c, 0x00000003, 0x000007ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 0x240c, 0x000007ff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 0x8a14, 0xf000001f, 0x00000007,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 0x8b24, 0xffffffff, 0x00ffffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 0x8b10, 0x0000ff0f, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 0x28a4c, 0x07ffffff, 0x4e000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 0x28350, 0x3f3f3fff, 0x2a00126a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 0x30, 0x000000ff, 0x0040,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 0x34, 0x00000040, 0x00004040,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 0x9100, 0x07ffffff, 0x03000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 0x8e88, 0x01ff1f3f, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 0x8e84, 0x01ff1f3f, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 0x9060, 0x0000007f, 0x00000020,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 0x9508, 0x00010000, 0x00010000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 0xac14, 0x00000200, 0x000002fb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 0xac10, 0xffffffff, 0x0000543b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 0xac0c, 0xffffffff, 0xa9210876,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 0x88d0, 0xffffffff, 0x000fff40,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 0x88d4, 0x0000001f, 0x00000010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 0x1410, 0x20000000, 0x20fffed8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 0x15c0, 0x000c0fc0, 0x000c0400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) static const u32 tahiti_golden_registers2[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 0xc64, 0x00000001, 0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) static const u32 pitcairn_golden_rlc_registers[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 0xc424, 0xffffffff, 0x00601004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 0xc47c, 0xffffffff, 0x10102020,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 0xc488, 0xffffffff, 0x01000020,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 0xc314, 0xffffffff, 0x00000800,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 0xc30c, 0xffffffff, 0x800000a4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) static const u32 pitcairn_golden_registers[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 0x9a10, 0x00010000, 0x00018208,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 0x9830, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 0x9834, 0xf00fffff, 0x00000400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 0x9838, 0x0002021c, 0x00020200,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 0xc78, 0x00000080, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 0xd030, 0x000300c0, 0x00800040,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 0xd830, 0x000300c0, 0x00800040,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 0x5bb0, 0x000000f0, 0x00000070,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 0x5bc0, 0x00200000, 0x50100000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 0x7030, 0x31000311, 0x00000011,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 0x2ae4, 0x00073ffe, 0x000022a2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 0x240c, 0x000007ff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 0x8a14, 0xf000001f, 0x00000007,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 0x8b24, 0xffffffff, 0x00ffffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 0x8b10, 0x0000ff0f, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 0x28a4c, 0x07ffffff, 0x4e000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 0x28350, 0x3f3f3fff, 0x2a00126a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 0x30, 0x000000ff, 0x0040,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 0x34, 0x00000040, 0x00004040,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 0x9100, 0x07ffffff, 0x03000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 0x9060, 0x0000007f, 0x00000020,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 0x9508, 0x00010000, 0x00010000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 0xac14, 0x000003ff, 0x000000f7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 0xac10, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 0xac0c, 0xffffffff, 0x32761054,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 0x88d4, 0x0000001f, 0x00000010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 0x15c0, 0x000c0fc0, 0x000c0400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) static const u32 verde_golden_rlc_registers[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 0xc424, 0xffffffff, 0x033f1005,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 0xc47c, 0xffffffff, 0x10808020,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 0xc488, 0xffffffff, 0x00800008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 0xc314, 0xffffffff, 0x00001000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 0xc30c, 0xffffffff, 0x80010014
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) static const u32 verde_golden_registers[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 0x9a10, 0x00010000, 0x00018208,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 0x9830, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 0x9834, 0xf00fffff, 0x00000400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 0x9838, 0x0002021c, 0x00020200,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 0xc78, 0x00000080, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 0xd030, 0x000300c0, 0x00800040,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 0xd030, 0x000300c0, 0x00800040,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 0xd830, 0x000300c0, 0x00800040,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 0xd830, 0x000300c0, 0x00800040,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 0x5bb0, 0x000000f0, 0x00000070,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 0x5bc0, 0x00200000, 0x50100000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 0x7030, 0x31000311, 0x00000011,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 0x2ae4, 0x00073ffe, 0x000022a2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 0x2ae4, 0x00073ffe, 0x000022a2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 0x2ae4, 0x00073ffe, 0x000022a2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 0x240c, 0x000007ff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 0x240c, 0x000007ff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 0x240c, 0x000007ff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 0x8a14, 0xf000001f, 0x00000007,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 0x8a14, 0xf000001f, 0x00000007,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 0x8a14, 0xf000001f, 0x00000007,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 0x8b24, 0xffffffff, 0x00ffffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 0x8b10, 0x0000ff0f, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 0x28a4c, 0x07ffffff, 0x4e000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 0x28350, 0x3f3f3fff, 0x0000124a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 0x28350, 0x3f3f3fff, 0x0000124a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 0x28350, 0x3f3f3fff, 0x0000124a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 0x30, 0x000000ff, 0x0040,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 0x34, 0x00000040, 0x00004040,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 0x9100, 0x07ffffff, 0x03000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 0x9100, 0x07ffffff, 0x03000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 0x8e88, 0x01ff1f3f, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 0x8e88, 0x01ff1f3f, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 0x8e88, 0x01ff1f3f, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 0x8e84, 0x01ff1f3f, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 0x8e84, 0x01ff1f3f, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 0x8e84, 0x01ff1f3f, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 0x9060, 0x0000007f, 0x00000020,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 0x9508, 0x00010000, 0x00010000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 0xac14, 0x000003ff, 0x00000003,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 0xac14, 0x000003ff, 0x00000003,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 0xac14, 0x000003ff, 0x00000003,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 0xac10, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 0xac10, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 0xac10, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 0xac0c, 0xffffffff, 0x00001032,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 0xac0c, 0xffffffff, 0x00001032,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 0xac0c, 0xffffffff, 0x00001032,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 0x88d4, 0x0000001f, 0x00000010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 0x88d4, 0x0000001f, 0x00000010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 0x88d4, 0x0000001f, 0x00000010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 0x15c0, 0x000c0fc0, 0x000c0400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) static const u32 oland_golden_rlc_registers[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 0xc424, 0xffffffff, 0x00601005,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 0xc47c, 0xffffffff, 0x10104040,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 0xc488, 0xffffffff, 0x0100000a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 0xc314, 0xffffffff, 0x00000800,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 0xc30c, 0xffffffff, 0x800000f4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) static const u32 oland_golden_registers[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 0x9a10, 0x00010000, 0x00018208,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 0x9830, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 0x9834, 0xf00fffff, 0x00000400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 0x9838, 0x0002021c, 0x00020200,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 0xc78, 0x00000080, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 0xd030, 0x000300c0, 0x00800040,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 0xd830, 0x000300c0, 0x00800040,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 0x5bb0, 0x000000f0, 0x00000070,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 0x5bc0, 0x00200000, 0x50100000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 0x7030, 0x31000311, 0x00000011,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 0x2ae4, 0x00073ffe, 0x000022a2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 0x240c, 0x000007ff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 0x8a14, 0xf000001f, 0x00000007,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 0x8b24, 0xffffffff, 0x00ffffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 0x8b10, 0x0000ff0f, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 0x28a4c, 0x07ffffff, 0x4e000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 0x28350, 0x3f3f3fff, 0x00000082,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 0x30, 0x000000ff, 0x0040,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 0x34, 0x00000040, 0x00004040,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 0x9100, 0x07ffffff, 0x03000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 0x9060, 0x0000007f, 0x00000020,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 0x9508, 0x00010000, 0x00010000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 0xac14, 0x000003ff, 0x000000f3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 0xac10, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 0xac0c, 0xffffffff, 0x00003210,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 0x88d4, 0x0000001f, 0x00000010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 0x15c0, 0x000c0fc0, 0x000c0400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) static const u32 hainan_golden_registers[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 0x9a10, 0x00010000, 0x00018208,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 0x9830, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 0x9834, 0xf00fffff, 0x00000400,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 0x9838, 0x0002021c, 0x00020200,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 0xd0c0, 0xff000fff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 0xd030, 0x000300c0, 0x00800040,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 0xd8c0, 0xff000fff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 0xd830, 0x000300c0, 0x00800040,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 0x2ae4, 0x00073ffe, 0x000022a2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 0x240c, 0x000007ff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 0x8a14, 0xf000001f, 0x00000007,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 0x8b24, 0xffffffff, 0x00ffffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 0x8b10, 0x0000ff0f, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 0x28a4c, 0x07ffffff, 0x4e000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 0x28350, 0x3f3f3fff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 0x30, 0x000000ff, 0x0040,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 0x34, 0x00000040, 0x00004040,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 0x9100, 0x03e00000, 0x03600000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 0x9060, 0x0000007f, 0x00000020,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 0x9508, 0x00010000, 0x00010000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 0xac14, 0x000003ff, 0x000000f1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 0xac10, 0xffffffff, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 0xac0c, 0xffffffff, 0x00003210,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 0x88d4, 0x0000001f, 0x00000010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 0x15c0, 0x000c0fc0, 0x000c0400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) static const u32 hainan_golden_registers2[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 0x98f8, 0xffffffff, 0x02010001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) static const u32 tahiti_mgcg_cgcg_init[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 0xc400, 0xffffffff, 0xfffffffc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 0x802c, 0xffffffff, 0xe0000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 0x9a60, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 0x92a4, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 0xc164, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 0x9774, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 0x8984, 0xffffffff, 0x06000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 0x8a18, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 0x92a0, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 0xc380, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 0x8b28, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 0x9144, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 0x8d88, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 0x8d8c, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 0x9030, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 0x9034, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 0x9038, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 0x903c, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 0xad80, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 0xac54, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 0x897c, 0xffffffff, 0x06000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 0x9868, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 0x9510, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 0xaf04, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 0xae04, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 0x949c, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 0x802c, 0xffffffff, 0xe0000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 0x9160, 0xffffffff, 0x00010000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 0x9164, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 0x9168, 0xffffffff, 0x00040007,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 0x916c, 0xffffffff, 0x00060005,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 0x9170, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 0x9174, 0xffffffff, 0x00020001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 0x9178, 0xffffffff, 0x00040003,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 0x917c, 0xffffffff, 0x00000007,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 0x9180, 0xffffffff, 0x00060005,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 0x9184, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 0x9188, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 0x918c, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 0x9190, 0xffffffff, 0x00000008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 0x9194, 0xffffffff, 0x00070006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 0x9198, 0xffffffff, 0x000a0009,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 0x919c, 0xffffffff, 0x00040003,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 0x91a0, 0xffffffff, 0x00060005,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 0x91a4, 0xffffffff, 0x00000009,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 0x91a8, 0xffffffff, 0x00080007,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 0x91ac, 0xffffffff, 0x000b000a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 0x91b0, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 0x91b4, 0xffffffff, 0x00070006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 0x91b8, 0xffffffff, 0x0008000b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 0x91bc, 0xffffffff, 0x000a0009,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 0x91c0, 0xffffffff, 0x000d000c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 0x91c4, 0xffffffff, 0x00060005,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 0x91c8, 0xffffffff, 0x00080007,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 0x91cc, 0xffffffff, 0x0000000b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 0x91d0, 0xffffffff, 0x000a0009,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 0x91d4, 0xffffffff, 0x000d000c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 0x91d8, 0xffffffff, 0x00070006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 0x91dc, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 0x91e0, 0xffffffff, 0x0000000c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 0x91e4, 0xffffffff, 0x000b000a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 0x91e8, 0xffffffff, 0x000e000d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 0x91ec, 0xffffffff, 0x00080007,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 0x91f0, 0xffffffff, 0x000a0009,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 0x91f4, 0xffffffff, 0x0000000d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 0x91f8, 0xffffffff, 0x000c000b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 0x91fc, 0xffffffff, 0x000f000e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 0x9200, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 0x9204, 0xffffffff, 0x000b000a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 0x9208, 0xffffffff, 0x000c000f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 0x920c, 0xffffffff, 0x000e000d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 0x9210, 0xffffffff, 0x00110010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 0x9214, 0xffffffff, 0x000a0009,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 0x9218, 0xffffffff, 0x000c000b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 0x921c, 0xffffffff, 0x0000000f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 0x9220, 0xffffffff, 0x000e000d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 0x9224, 0xffffffff, 0x00110010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 0x9228, 0xffffffff, 0x000b000a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 0x922c, 0xffffffff, 0x000d000c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 0x9230, 0xffffffff, 0x00000010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 0x9234, 0xffffffff, 0x000f000e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 0x9238, 0xffffffff, 0x00120011,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 0x923c, 0xffffffff, 0x000c000b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 0x9240, 0xffffffff, 0x000e000d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 0x9244, 0xffffffff, 0x00000011,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 0x9248, 0xffffffff, 0x0010000f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 0x924c, 0xffffffff, 0x00130012,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 0x9250, 0xffffffff, 0x000d000c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 0x9254, 0xffffffff, 0x000f000e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 0x9258, 0xffffffff, 0x00100013,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 0x925c, 0xffffffff, 0x00120011,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 0x9260, 0xffffffff, 0x00150014,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 0x9264, 0xffffffff, 0x000e000d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 0x9268, 0xffffffff, 0x0010000f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 0x926c, 0xffffffff, 0x00000013,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 0x9270, 0xffffffff, 0x00120011,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 0x9274, 0xffffffff, 0x00150014,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 0x9278, 0xffffffff, 0x000f000e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 0x927c, 0xffffffff, 0x00110010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 0x9280, 0xffffffff, 0x00000014,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 0x9284, 0xffffffff, 0x00130012,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 0x9288, 0xffffffff, 0x00160015,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 0x928c, 0xffffffff, 0x0010000f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 0x9290, 0xffffffff, 0x00120011,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 0x9294, 0xffffffff, 0x00000015,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 0x9298, 0xffffffff, 0x00140013,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 0x929c, 0xffffffff, 0x00170016,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 0x9150, 0xffffffff, 0x96940200,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 0x8708, 0xffffffff, 0x00900100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 0xc478, 0xffffffff, 0x00000080,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 0xc404, 0xffffffff, 0x0020003f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 0x30, 0xffffffff, 0x0000001c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 0x34, 0x000f0000, 0x000f0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 0x160c, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 0x1024, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 0x102c, 0x00000101, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 0x20a8, 0xffffffff, 0x00000104,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 0x264c, 0x000c0000, 0x000c0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 0x2648, 0x000c0000, 0x000c0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 0x55e4, 0xff000fff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 0x55e8, 0x00000001, 0x00000001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 0x2f50, 0x00000001, 0x00000001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 0x30cc, 0xc0000fff, 0x00000104,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 0xc1e4, 0x00000001, 0x00000001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 0xd0c0, 0xfffffff0, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 0xd8c0, 0xfffffff0, 0x00000100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) static const u32 pitcairn_mgcg_cgcg_init[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 0xc400, 0xffffffff, 0xfffffffc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 0x802c, 0xffffffff, 0xe0000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 0x9a60, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 0x92a4, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) 0xc164, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) 0x9774, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) 0x8984, 0xffffffff, 0x06000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 0x8a18, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) 0x92a0, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 0xc380, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 0x8b28, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 0x9144, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 0x8d88, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 0x8d8c, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 0x9030, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) 0x9034, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 0x9038, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 0x903c, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 0xad80, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 0xac54, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) 0x897c, 0xffffffff, 0x06000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 0x9868, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) 0x9510, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) 0xaf04, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) 0xae04, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 0x949c, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) 0x802c, 0xffffffff, 0xe0000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 0x9160, 0xffffffff, 0x00010000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 0x9164, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) 0x9168, 0xffffffff, 0x00040007,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) 0x916c, 0xffffffff, 0x00060005,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 0x9170, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 0x9174, 0xffffffff, 0x00020001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) 0x9178, 0xffffffff, 0x00040003,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) 0x917c, 0xffffffff, 0x00000007,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) 0x9180, 0xffffffff, 0x00060005,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) 0x9184, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) 0x9188, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) 0x918c, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) 0x9190, 0xffffffff, 0x00000008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) 0x9194, 0xffffffff, 0x00070006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) 0x9198, 0xffffffff, 0x000a0009,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) 0x919c, 0xffffffff, 0x00040003,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) 0x91a0, 0xffffffff, 0x00060005,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) 0x91a4, 0xffffffff, 0x00000009,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) 0x91a8, 0xffffffff, 0x00080007,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) 0x91ac, 0xffffffff, 0x000b000a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) 0x91b0, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 0x91b4, 0xffffffff, 0x00070006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) 0x91b8, 0xffffffff, 0x0008000b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) 0x91bc, 0xffffffff, 0x000a0009,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) 0x91c0, 0xffffffff, 0x000d000c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) 0x9200, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) 0x9204, 0xffffffff, 0x000b000a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) 0x9208, 0xffffffff, 0x000c000f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) 0x920c, 0xffffffff, 0x000e000d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) 0x9210, 0xffffffff, 0x00110010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) 0x9214, 0xffffffff, 0x000a0009,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) 0x9218, 0xffffffff, 0x000c000b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) 0x921c, 0xffffffff, 0x0000000f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) 0x9220, 0xffffffff, 0x000e000d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) 0x9224, 0xffffffff, 0x00110010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) 0x9228, 0xffffffff, 0x000b000a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) 0x922c, 0xffffffff, 0x000d000c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) 0x9230, 0xffffffff, 0x00000010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) 0x9234, 0xffffffff, 0x000f000e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) 0x9238, 0xffffffff, 0x00120011,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) 0x923c, 0xffffffff, 0x000c000b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) 0x9240, 0xffffffff, 0x000e000d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) 0x9244, 0xffffffff, 0x00000011,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) 0x9248, 0xffffffff, 0x0010000f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) 0x924c, 0xffffffff, 0x00130012,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) 0x9250, 0xffffffff, 0x000d000c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) 0x9254, 0xffffffff, 0x000f000e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) 0x9258, 0xffffffff, 0x00100013,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) 0x925c, 0xffffffff, 0x00120011,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) 0x9260, 0xffffffff, 0x00150014,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) 0x9150, 0xffffffff, 0x96940200,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) 0x8708, 0xffffffff, 0x00900100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) 0xc478, 0xffffffff, 0x00000080,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) 0xc404, 0xffffffff, 0x0020003f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) 0x30, 0xffffffff, 0x0000001c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) 0x34, 0x000f0000, 0x000f0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) 0x160c, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) 0x1024, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) 0x102c, 0x00000101, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) 0x20a8, 0xffffffff, 0x00000104,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) 0x55e4, 0xff000fff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) 0x55e8, 0x00000001, 0x00000001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) 0x2f50, 0x00000001, 0x00000001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) 0x30cc, 0xc0000fff, 0x00000104,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) 0xc1e4, 0x00000001, 0x00000001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) 0xd0c0, 0xfffffff0, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) 0xd8c0, 0xfffffff0, 0x00000100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) static const u32 verde_mgcg_cgcg_init[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) 0xc400, 0xffffffff, 0xfffffffc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) 0x802c, 0xffffffff, 0xe0000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) 0x9a60, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) 0x92a4, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) 0xc164, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) 0x9774, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) 0x8984, 0xffffffff, 0x06000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) 0x8a18, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) 0x92a0, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) 0xc380, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) 0x8b28, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) 0x9144, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) 0x8d88, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) 0x8d8c, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) 0x9030, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) 0x9034, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) 0x9038, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) 0x903c, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) 0xad80, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) 0xac54, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) 0x897c, 0xffffffff, 0x06000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) 0x9868, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) 0x9510, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) 0xaf04, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) 0xae04, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) 0x949c, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) 0x802c, 0xffffffff, 0xe0000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) 0x9160, 0xffffffff, 0x00010000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) 0x9164, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) 0x9168, 0xffffffff, 0x00040007,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) 0x916c, 0xffffffff, 0x00060005,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) 0x9170, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) 0x9174, 0xffffffff, 0x00020001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) 0x9178, 0xffffffff, 0x00040003,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) 0x917c, 0xffffffff, 0x00000007,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) 0x9180, 0xffffffff, 0x00060005,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) 0x9184, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) 0x9188, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) 0x918c, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) 0x9190, 0xffffffff, 0x00000008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) 0x9194, 0xffffffff, 0x00070006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) 0x9198, 0xffffffff, 0x000a0009,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) 0x919c, 0xffffffff, 0x00040003,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) 0x91a0, 0xffffffff, 0x00060005,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) 0x91a4, 0xffffffff, 0x00000009,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) 0x91a8, 0xffffffff, 0x00080007,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) 0x91ac, 0xffffffff, 0x000b000a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) 0x91b0, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) 0x91b4, 0xffffffff, 0x00070006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) 0x91b8, 0xffffffff, 0x0008000b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) 0x91bc, 0xffffffff, 0x000a0009,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) 0x91c0, 0xffffffff, 0x000d000c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) 0x9200, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) 0x9204, 0xffffffff, 0x000b000a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) 0x9208, 0xffffffff, 0x000c000f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) 0x920c, 0xffffffff, 0x000e000d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) 0x9210, 0xffffffff, 0x00110010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) 0x9214, 0xffffffff, 0x000a0009,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) 0x9218, 0xffffffff, 0x000c000b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) 0x921c, 0xffffffff, 0x0000000f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) 0x9220, 0xffffffff, 0x000e000d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) 0x9224, 0xffffffff, 0x00110010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) 0x9228, 0xffffffff, 0x000b000a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) 0x922c, 0xffffffff, 0x000d000c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) 0x9230, 0xffffffff, 0x00000010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) 0x9234, 0xffffffff, 0x000f000e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) 0x9238, 0xffffffff, 0x00120011,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) 0x923c, 0xffffffff, 0x000c000b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) 0x9240, 0xffffffff, 0x000e000d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) 0x9244, 0xffffffff, 0x00000011,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) 0x9248, 0xffffffff, 0x0010000f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) 0x924c, 0xffffffff, 0x00130012,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) 0x9250, 0xffffffff, 0x000d000c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) 0x9254, 0xffffffff, 0x000f000e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) 0x9258, 0xffffffff, 0x00100013,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) 0x925c, 0xffffffff, 0x00120011,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) 0x9260, 0xffffffff, 0x00150014,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) 0x9150, 0xffffffff, 0x96940200,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) 0x8708, 0xffffffff, 0x00900100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) 0xc478, 0xffffffff, 0x00000080,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) 0xc404, 0xffffffff, 0x0020003f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) 0x30, 0xffffffff, 0x0000001c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) 0x34, 0x000f0000, 0x000f0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) 0x160c, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) 0x1024, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) 0x102c, 0x00000101, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) 0x20a8, 0xffffffff, 0x00000104,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) 0x264c, 0x000c0000, 0x000c0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) 0x2648, 0x000c0000, 0x000c0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) 0x55e4, 0xff000fff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) 0x55e8, 0x00000001, 0x00000001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) 0x2f50, 0x00000001, 0x00000001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) 0x30cc, 0xc0000fff, 0x00000104,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) 0xc1e4, 0x00000001, 0x00000001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) 0xd0c0, 0xfffffff0, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) 0xd8c0, 0xfffffff0, 0x00000100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) static const u32 oland_mgcg_cgcg_init[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) 0xc400, 0xffffffff, 0xfffffffc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) 0x802c, 0xffffffff, 0xe0000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) 0x9a60, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) 0x92a4, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) 0xc164, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) 0x9774, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) 0x8984, 0xffffffff, 0x06000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) 0x8a18, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) 0x92a0, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) 0xc380, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) 0x8b28, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) 0x9144, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) 0x8d88, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) 0x8d8c, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) 0x9030, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) 0x9034, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) 0x9038, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) 0x903c, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) 0xad80, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) 0xac54, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) 0x897c, 0xffffffff, 0x06000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) 0x9868, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) 0x9510, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) 0xaf04, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) 0xae04, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) 0x949c, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) 0x802c, 0xffffffff, 0xe0000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) 0x9160, 0xffffffff, 0x00010000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) 0x9164, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) 0x9168, 0xffffffff, 0x00040007,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) 0x916c, 0xffffffff, 0x00060005,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) 0x9170, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) 0x9174, 0xffffffff, 0x00020001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) 0x9178, 0xffffffff, 0x00040003,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) 0x917c, 0xffffffff, 0x00000007,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) 0x9180, 0xffffffff, 0x00060005,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) 0x9184, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) 0x9188, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) 0x918c, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) 0x9190, 0xffffffff, 0x00000008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) 0x9194, 0xffffffff, 0x00070006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) 0x9198, 0xffffffff, 0x000a0009,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) 0x919c, 0xffffffff, 0x00040003,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) 0x91a0, 0xffffffff, 0x00060005,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) 0x91a4, 0xffffffff, 0x00000009,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) 0x91a8, 0xffffffff, 0x00080007,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) 0x91ac, 0xffffffff, 0x000b000a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) 0x91b0, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) 0x91b4, 0xffffffff, 0x00070006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 0x91b8, 0xffffffff, 0x0008000b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 0x91bc, 0xffffffff, 0x000a0009,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 0x91c0, 0xffffffff, 0x000d000c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 0x91c4, 0xffffffff, 0x00060005,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 0x91c8, 0xffffffff, 0x00080007,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 0x91cc, 0xffffffff, 0x0000000b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 0x91d0, 0xffffffff, 0x000a0009,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 0x91d4, 0xffffffff, 0x000d000c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 0x9150, 0xffffffff, 0x96940200,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 0x8708, 0xffffffff, 0x00900100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 0xc478, 0xffffffff, 0x00000080,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 0xc404, 0xffffffff, 0x0020003f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 0x30, 0xffffffff, 0x0000001c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 0x34, 0x000f0000, 0x000f0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 0x160c, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 0x1024, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 0x102c, 0x00000101, 0x00000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 0x20a8, 0xffffffff, 0x00000104,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 0x264c, 0x000c0000, 0x000c0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 0x2648, 0x000c0000, 0x000c0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 0x55e4, 0xff000fff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 0x55e8, 0x00000001, 0x00000001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 0x2f50, 0x00000001, 0x00000001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 0x30cc, 0xc0000fff, 0x00000104,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 0xc1e4, 0x00000001, 0x00000001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 0xd0c0, 0xfffffff0, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 0xd8c0, 0xfffffff0, 0x00000100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) static const u32 hainan_mgcg_cgcg_init[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 0xc400, 0xffffffff, 0xfffffffc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 0x802c, 0xffffffff, 0xe0000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 0x9a60, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 0x92a4, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 0xc164, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 0x9774, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 0x8984, 0xffffffff, 0x06000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 0x8a18, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 0x92a0, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 0xc380, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 0x8b28, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 0x9144, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 0x8d88, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 0x8d8c, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 0x9030, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 0x9034, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 0x9038, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 0x903c, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 0xad80, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 0xac54, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 0x897c, 0xffffffff, 0x06000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 0x9868, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 0x9510, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 0xaf04, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 0xae04, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 0x949c, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 0x802c, 0xffffffff, 0xe0000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 0x9160, 0xffffffff, 0x00010000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 0x9164, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 0x9168, 0xffffffff, 0x00040007,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 0x916c, 0xffffffff, 0x00060005,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 0x9170, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 0x9174, 0xffffffff, 0x00020001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 0x9178, 0xffffffff, 0x00040003,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 0x917c, 0xffffffff, 0x00000007,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 0x9180, 0xffffffff, 0x00060005,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 0x9184, 0xffffffff, 0x00090008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 0x9188, 0xffffffff, 0x00030002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 0x918c, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 0x9190, 0xffffffff, 0x00000008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 0x9194, 0xffffffff, 0x00070006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 0x9198, 0xffffffff, 0x000a0009,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 0x919c, 0xffffffff, 0x00040003,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 0x91a0, 0xffffffff, 0x00060005,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 0x91a4, 0xffffffff, 0x00000009,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 0x91a8, 0xffffffff, 0x00080007,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 0x91ac, 0xffffffff, 0x000b000a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 0x91b0, 0xffffffff, 0x00050004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 0x91b4, 0xffffffff, 0x00070006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 0x91b8, 0xffffffff, 0x0008000b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 0x91bc, 0xffffffff, 0x000a0009,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 0x91c0, 0xffffffff, 0x000d000c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 0x91c4, 0xffffffff, 0x00060005,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 0x91c8, 0xffffffff, 0x00080007,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 0x91cc, 0xffffffff, 0x0000000b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 0x91d0, 0xffffffff, 0x000a0009,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 0x91d4, 0xffffffff, 0x000d000c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 0x9150, 0xffffffff, 0x96940200,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 0x8708, 0xffffffff, 0x00900100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 0xc478, 0xffffffff, 0x00000080,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 0xc404, 0xffffffff, 0x0020003f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 0x30, 0xffffffff, 0x0000001c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 0x34, 0x000f0000, 0x000f0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 0x160c, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 0x1024, 0xffffffff, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 0x20a8, 0xffffffff, 0x00000104,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 0x264c, 0x000c0000, 0x000c0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 0x2648, 0x000c0000, 0x000c0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 0x2f50, 0x00000001, 0x00000001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 0x30cc, 0xc0000fff, 0x00000104,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 0xc1e4, 0x00000001, 0x00000001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 0xd0c0, 0xfffffff0, 0x00000100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 0xd8c0, 0xfffffff0, 0x00000100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) static u32 verde_pg_init[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 0x353c, 0xffffffff, 0x40000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 0x3538, 0xffffffff, 0x200010ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 0x353c, 0xffffffff, 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 0x353c, 0xffffffff, 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 0x353c, 0xffffffff, 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 0x353c, 0xffffffff, 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 0x353c, 0xffffffff, 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 0x353c, 0xffffffff, 0x7007,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 0x3538, 0xffffffff, 0x300010ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 0x353c, 0xffffffff, 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 0x353c, 0xffffffff, 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 0x353c, 0xffffffff, 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 0x353c, 0xffffffff, 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 0x353c, 0xffffffff, 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 0x353c, 0xffffffff, 0x400000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 0x3538, 0xffffffff, 0x100010ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 0x353c, 0xffffffff, 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 0x353c, 0xffffffff, 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 0x353c, 0xffffffff, 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 0x353c, 0xffffffff, 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 0x353c, 0xffffffff, 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 0x353c, 0xffffffff, 0x120200,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 0x3538, 0xffffffff, 0x500010ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 0x353c, 0xffffffff, 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 0x353c, 0xffffffff, 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 0x353c, 0xffffffff, 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 0x353c, 0xffffffff, 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 0x353c, 0xffffffff, 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 0x353c, 0xffffffff, 0x1e1e16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 0x3538, 0xffffffff, 0x600010ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 0x353c, 0xffffffff, 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 0x353c, 0xffffffff, 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 0x353c, 0xffffffff, 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 0x353c, 0xffffffff, 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 0x353c, 0xffffffff, 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 0x353c, 0xffffffff, 0x171f1e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 0x3538, 0xffffffff, 0x700010ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 0x353c, 0xffffffff, 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 0x353c, 0xffffffff, 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 0x353c, 0xffffffff, 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 0x353c, 0xffffffff, 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 0x353c, 0xffffffff, 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 0x353c, 0xffffffff, 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 0x3538, 0xffffffff, 0x9ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 0x3500, 0xffffffff, 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 0x3504, 0xffffffff, 0x10000800,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 0x3504, 0xffffffff, 0xf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 0x3504, 0xffffffff, 0xf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 0x3500, 0xffffffff, 0x4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 0x3504, 0xffffffff, 0x1000051e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 0x3504, 0xffffffff, 0xffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 0x3504, 0xffffffff, 0xffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 0x3500, 0xffffffff, 0x8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 0x3504, 0xffffffff, 0x80500,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 0x3500, 0xffffffff, 0x12,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 0x3504, 0xffffffff, 0x9050c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 0x3500, 0xffffffff, 0x1d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 0x3504, 0xffffffff, 0xb052c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 0x3500, 0xffffffff, 0x2a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 0x3504, 0xffffffff, 0x1053e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 0x3500, 0xffffffff, 0x2d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 0x3504, 0xffffffff, 0x10546,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 0x3500, 0xffffffff, 0x30,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 0x3504, 0xffffffff, 0xa054e,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 0x3500, 0xffffffff, 0x3c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 0x3504, 0xffffffff, 0x1055f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 0x3500, 0xffffffff, 0x3f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 0x3504, 0xffffffff, 0x10567,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 0x3500, 0xffffffff, 0x42,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 0x3504, 0xffffffff, 0x1056f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 0x3500, 0xffffffff, 0x45,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 0x3504, 0xffffffff, 0x10572,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 0x3500, 0xffffffff, 0x48,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 0x3504, 0xffffffff, 0x20575,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 0x3500, 0xffffffff, 0x4c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 0x3504, 0xffffffff, 0x190801,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 0x3500, 0xffffffff, 0x67,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 0x3504, 0xffffffff, 0x1082a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 0x3500, 0xffffffff, 0x6a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 0x3504, 0xffffffff, 0x1b082d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 0x3500, 0xffffffff, 0x87,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 0x3504, 0xffffffff, 0x310851,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 0x3500, 0xffffffff, 0xba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 0x3504, 0xffffffff, 0x891,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 0x3500, 0xffffffff, 0xbc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 0x3504, 0xffffffff, 0x893,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 0x3500, 0xffffffff, 0xbe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 0x3504, 0xffffffff, 0x20895,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 0x3500, 0xffffffff, 0xc2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 0x3504, 0xffffffff, 0x20899,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 0x3500, 0xffffffff, 0xc6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 0x3504, 0xffffffff, 0x2089d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 0x3500, 0xffffffff, 0xca,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 0x3504, 0xffffffff, 0x8a1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 0x3500, 0xffffffff, 0xcc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 0x3504, 0xffffffff, 0x8a3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 0x3500, 0xffffffff, 0xce,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 0x3504, 0xffffffff, 0x308a5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 0x3500, 0xffffffff, 0xd3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 0x3504, 0xffffffff, 0x6d08cd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 0x3500, 0xffffffff, 0x142,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 0x3504, 0xffffffff, 0x2000095a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 0x3504, 0xffffffff, 0x1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 0x3500, 0xffffffff, 0x144,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 0x3504, 0xffffffff, 0x301f095b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 0x3500, 0xffffffff, 0x165,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 0x3504, 0xffffffff, 0xc094d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 0x3500, 0xffffffff, 0x173,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 0x3504, 0xffffffff, 0xf096d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 0x3500, 0xffffffff, 0x184,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 0x3504, 0xffffffff, 0x15097f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 0x3500, 0xffffffff, 0x19b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 0x3504, 0xffffffff, 0xc0998,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 0x3500, 0xffffffff, 0x1a9,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 0x3504, 0xffffffff, 0x409a7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 0x3500, 0xffffffff, 0x1af,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 0x3504, 0xffffffff, 0xcdc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 0x3500, 0xffffffff, 0x1b1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 0x3504, 0xffffffff, 0x800,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 0x3508, 0xffffffff, 0x6c9b2000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 0x3510, 0xfc00, 0x2000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 0x3544, 0xffffffff, 0xfc0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 0x28d4, 0x00000100, 0x100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) static void si_init_golden_registers(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) switch (rdev->family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) case CHIP_TAHITI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) radeon_program_register_sequence(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) tahiti_golden_registers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) (const u32)ARRAY_SIZE(tahiti_golden_registers));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) radeon_program_register_sequence(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) tahiti_golden_rlc_registers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) (const u32)ARRAY_SIZE(tahiti_golden_rlc_registers));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) radeon_program_register_sequence(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) tahiti_mgcg_cgcg_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) (const u32)ARRAY_SIZE(tahiti_mgcg_cgcg_init));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) radeon_program_register_sequence(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) tahiti_golden_registers2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) (const u32)ARRAY_SIZE(tahiti_golden_registers2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) case CHIP_PITCAIRN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) radeon_program_register_sequence(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) pitcairn_golden_registers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) (const u32)ARRAY_SIZE(pitcairn_golden_registers));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) radeon_program_register_sequence(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) pitcairn_golden_rlc_registers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) (const u32)ARRAY_SIZE(pitcairn_golden_rlc_registers));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) radeon_program_register_sequence(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) pitcairn_mgcg_cgcg_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) (const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) case CHIP_VERDE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) radeon_program_register_sequence(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) verde_golden_registers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) (const u32)ARRAY_SIZE(verde_golden_registers));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) radeon_program_register_sequence(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) verde_golden_rlc_registers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) (const u32)ARRAY_SIZE(verde_golden_rlc_registers));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) radeon_program_register_sequence(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) verde_mgcg_cgcg_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) (const u32)ARRAY_SIZE(verde_mgcg_cgcg_init));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) radeon_program_register_sequence(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) verde_pg_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) (const u32)ARRAY_SIZE(verde_pg_init));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) case CHIP_OLAND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) radeon_program_register_sequence(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) oland_golden_registers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) (const u32)ARRAY_SIZE(oland_golden_registers));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) radeon_program_register_sequence(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) oland_golden_rlc_registers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) (const u32)ARRAY_SIZE(oland_golden_rlc_registers));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) radeon_program_register_sequence(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) oland_mgcg_cgcg_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) case CHIP_HAINAN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) radeon_program_register_sequence(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) hainan_golden_registers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) (const u32)ARRAY_SIZE(hainan_golden_registers));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) radeon_program_register_sequence(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) hainan_golden_registers2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) (const u32)ARRAY_SIZE(hainan_golden_registers2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) radeon_program_register_sequence(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) hainan_mgcg_cgcg_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) (const u32)ARRAY_SIZE(hainan_mgcg_cgcg_init));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) * si_get_allowed_info_register - fetch the register for the info ioctl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) * @rdev: radeon_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) * @reg: register offset in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) * @val: register value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) * Returns 0 for success or -EINVAL for an invalid register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) int si_get_allowed_info_register(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) u32 reg, u32 *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) switch (reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) case GRBM_STATUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) case GRBM_STATUS2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) case GRBM_STATUS_SE0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) case GRBM_STATUS_SE1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) case SRBM_STATUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) case SRBM_STATUS2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) case (DMA_STATUS_REG + DMA0_REGISTER_OFFSET):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) case (DMA_STATUS_REG + DMA1_REGISTER_OFFSET):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) case UVD_STATUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) *val = RREG32(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) #define PCIE_BUS_CLK 10000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) #define TCLK (PCIE_BUS_CLK / 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) * si_get_xclk - get the xclk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) * @rdev: radeon_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) * Returns the reference clock used by the gfx engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) * (SI).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) u32 si_get_xclk(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) u32 reference_clock = rdev->clock.spll.reference_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) tmp = RREG32(CG_CLKPIN_CNTL_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) if (tmp & MUX_TCLK_TO_XCLK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) return TCLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) tmp = RREG32(CG_CLKPIN_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) if (tmp & XTALIN_DIVIDE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) return reference_clock / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) return reference_clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) /* get temperature in millidegrees */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) int si_get_temp(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) u32 temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) int actual_temp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) CTF_TEMP_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) if (temp & 0x200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) actual_temp = 255;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) actual_temp = temp & 0x1ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) actual_temp = (actual_temp * 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) return actual_temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) #define TAHITI_IO_MC_REGS_SIZE 36
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) static const u32 tahiti_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) {0x0000006f, 0x03044000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) {0x00000070, 0x0480c018},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) {0x00000071, 0x00000040},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) {0x00000072, 0x01000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) {0x00000074, 0x000000ff},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) {0x00000075, 0x00143400},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) {0x00000076, 0x08ec0800},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) {0x00000077, 0x040000cc},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) {0x00000079, 0x00000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) {0x0000007a, 0x21000409},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) {0x0000007c, 0x00000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) {0x0000007d, 0xe8000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) {0x0000007e, 0x044408a8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) {0x0000007f, 0x00000003},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) {0x00000080, 0x00000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) {0x00000081, 0x01000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) {0x00000082, 0x02000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) {0x00000083, 0x00000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) {0x00000084, 0xe3f3e4f4},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) {0x00000085, 0x00052024},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) {0x00000087, 0x00000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) {0x00000088, 0x66036603},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) {0x00000089, 0x01000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) {0x0000008b, 0x1c0a0000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) {0x0000008c, 0xff010000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) {0x0000008e, 0xffffefff},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) {0x0000008f, 0xfff3efff},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) {0x00000090, 0xfff3efbf},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) {0x00000094, 0x00101101},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) {0x00000095, 0x00000fff},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) {0x00000096, 0x00116fff},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) {0x00000097, 0x60010000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) {0x00000098, 0x10010000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) {0x00000099, 0x00006000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) {0x0000009a, 0x00001000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) {0x0000009f, 0x00a77400}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) static const u32 pitcairn_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) {0x0000006f, 0x03044000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) {0x00000070, 0x0480c018},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) {0x00000071, 0x00000040},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) {0x00000072, 0x01000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) {0x00000074, 0x000000ff},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) {0x00000075, 0x00143400},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) {0x00000076, 0x08ec0800},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) {0x00000077, 0x040000cc},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) {0x00000079, 0x00000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) {0x0000007a, 0x21000409},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) {0x0000007c, 0x00000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) {0x0000007d, 0xe8000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) {0x0000007e, 0x044408a8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) {0x0000007f, 0x00000003},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) {0x00000080, 0x00000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) {0x00000081, 0x01000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) {0x00000082, 0x02000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) {0x00000083, 0x00000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) {0x00000084, 0xe3f3e4f4},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) {0x00000085, 0x00052024},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) {0x00000087, 0x00000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) {0x00000088, 0x66036603},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) {0x00000089, 0x01000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) {0x0000008b, 0x1c0a0000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) {0x0000008c, 0xff010000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) {0x0000008e, 0xffffefff},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) {0x0000008f, 0xfff3efff},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) {0x00000090, 0xfff3efbf},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) {0x00000094, 0x00101101},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) {0x00000095, 0x00000fff},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) {0x00000096, 0x00116fff},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) {0x00000097, 0x60010000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) {0x00000098, 0x10010000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) {0x00000099, 0x00006000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) {0x0000009a, 0x00001000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) {0x0000009f, 0x00a47400}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) static const u32 verde_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) {0x0000006f, 0x03044000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) {0x00000070, 0x0480c018},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) {0x00000071, 0x00000040},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) {0x00000072, 0x01000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) {0x00000074, 0x000000ff},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) {0x00000075, 0x00143400},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) {0x00000076, 0x08ec0800},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) {0x00000077, 0x040000cc},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) {0x00000079, 0x00000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) {0x0000007a, 0x21000409},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) {0x0000007c, 0x00000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) {0x0000007d, 0xe8000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) {0x0000007e, 0x044408a8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) {0x0000007f, 0x00000003},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) {0x00000080, 0x00000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) {0x00000081, 0x01000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) {0x00000082, 0x02000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) {0x00000083, 0x00000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) {0x00000084, 0xe3f3e4f4},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) {0x00000085, 0x00052024},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) {0x00000087, 0x00000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) {0x00000088, 0x66036603},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) {0x00000089, 0x01000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) {0x0000008b, 0x1c0a0000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) {0x0000008c, 0xff010000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) {0x0000008e, 0xffffefff},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) {0x0000008f, 0xfff3efff},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) {0x00000090, 0xfff3efbf},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) {0x00000094, 0x00101101},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) {0x00000095, 0x00000fff},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) {0x00000096, 0x00116fff},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) {0x00000097, 0x60010000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) {0x00000098, 0x10010000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) {0x00000099, 0x00006000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) {0x0000009a, 0x00001000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) {0x0000009f, 0x00a37400}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) static const u32 oland_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) {0x0000006f, 0x03044000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) {0x00000070, 0x0480c018},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) {0x00000071, 0x00000040},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) {0x00000072, 0x01000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) {0x00000074, 0x000000ff},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) {0x00000075, 0x00143400},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) {0x00000076, 0x08ec0800},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) {0x00000077, 0x040000cc},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) {0x00000079, 0x00000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) {0x0000007a, 0x21000409},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) {0x0000007c, 0x00000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) {0x0000007d, 0xe8000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) {0x0000007e, 0x044408a8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) {0x0000007f, 0x00000003},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) {0x00000080, 0x00000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) {0x00000081, 0x01000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) {0x00000082, 0x02000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) {0x00000083, 0x00000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) {0x00000084, 0xe3f3e4f4},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) {0x00000085, 0x00052024},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) {0x00000087, 0x00000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) {0x00000088, 0x66036603},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) {0x00000089, 0x01000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) {0x0000008b, 0x1c0a0000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) {0x0000008c, 0xff010000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) {0x0000008e, 0xffffefff},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) {0x0000008f, 0xfff3efff},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) {0x00000090, 0xfff3efbf},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) {0x00000094, 0x00101101},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) {0x00000095, 0x00000fff},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) {0x00000096, 0x00116fff},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) {0x00000097, 0x60010000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) {0x00000098, 0x10010000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) {0x00000099, 0x00006000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) {0x0000009a, 0x00001000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) {0x0000009f, 0x00a17730}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) static const u32 hainan_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) {0x0000006f, 0x03044000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) {0x00000070, 0x0480c018},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) {0x00000071, 0x00000040},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) {0x00000072, 0x01000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) {0x00000074, 0x000000ff},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) {0x00000075, 0x00143400},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) {0x00000076, 0x08ec0800},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) {0x00000077, 0x040000cc},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) {0x00000079, 0x00000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) {0x0000007a, 0x21000409},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) {0x0000007c, 0x00000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) {0x0000007d, 0xe8000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) {0x0000007e, 0x044408a8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) {0x0000007f, 0x00000003},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) {0x00000080, 0x00000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) {0x00000081, 0x01000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) {0x00000082, 0x02000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) {0x00000083, 0x00000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) {0x00000084, 0xe3f3e4f4},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) {0x00000085, 0x00052024},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) {0x00000087, 0x00000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) {0x00000088, 0x66036603},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) {0x00000089, 0x01000000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) {0x0000008b, 0x1c0a0000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) {0x0000008c, 0xff010000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) {0x0000008e, 0xffffefff},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) {0x0000008f, 0xfff3efff},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) {0x00000090, 0xfff3efbf},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) {0x00000094, 0x00101101},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) {0x00000095, 0x00000fff},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) {0x00000096, 0x00116fff},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) {0x00000097, 0x60010000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) {0x00000098, 0x10010000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) {0x00000099, 0x00006000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) {0x0000009a, 0x00001000},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) {0x0000009f, 0x00a07730}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) /* ucode loading */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) int si_mc_load_microcode(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) const __be32 *fw_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) const __le32 *new_fw_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) u32 running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) u32 *io_mc_regs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) const __le32 *new_io_mc_regs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) int i, regs_size, ucode_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) if (!rdev->mc_fw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) if (rdev->new_fw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) const struct mc_firmware_header_v1_0 *hdr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) (const struct mc_firmware_header_v1_0 *)rdev->mc_fw->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) radeon_ucode_print_mc_hdr(&hdr->header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) new_io_mc_regs = (const __le32 *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) (rdev->mc_fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) new_fw_data = (const __le32 *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) (rdev->mc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) ucode_size = rdev->mc_fw->size / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) switch (rdev->family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) case CHIP_TAHITI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) io_mc_regs = (u32 *)&tahiti_io_mc_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) regs_size = TAHITI_IO_MC_REGS_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) case CHIP_PITCAIRN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) io_mc_regs = (u32 *)&pitcairn_io_mc_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) regs_size = TAHITI_IO_MC_REGS_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) case CHIP_VERDE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) io_mc_regs = (u32 *)&verde_io_mc_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) regs_size = TAHITI_IO_MC_REGS_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) case CHIP_OLAND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) io_mc_regs = (u32 *)&oland_io_mc_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) regs_size = TAHITI_IO_MC_REGS_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) case CHIP_HAINAN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) io_mc_regs = (u32 *)&hainan_io_mc_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) regs_size = TAHITI_IO_MC_REGS_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) fw_data = (const __be32 *)rdev->mc_fw->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) if (running == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) /* reset the engine and set to writable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) /* load mc io regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) for (i = 0; i < regs_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) if (rdev->new_fw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) WREG32(MC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) WREG32(MC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) /* load the MC ucode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) for (i = 0; i < ucode_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) if (rdev->new_fw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) WREG32(MC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) /* put the engine back into the active state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) /* wait for training to complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) for (i = 0; i < rdev->usec_timeout; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) for (i = 0; i < rdev->usec_timeout; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) static int si_init_microcode(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) const char *chip_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) const char *new_chip_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) size_t smc_req_size, mc2_req_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) char fw_name[30];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) int new_fw = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) bool new_smc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) bool si58_fw = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) bool banks2_fw = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) DRM_DEBUG("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) switch (rdev->family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) case CHIP_TAHITI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) chip_name = "TAHITI";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) new_chip_name = "tahiti";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) pfp_req_size = SI_PFP_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) me_req_size = SI_PM4_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) ce_req_size = SI_CE_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) rlc_req_size = SI_RLC_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) mc_req_size = SI_MC_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) mc2_req_size = TAHITI_MC_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) smc_req_size = ALIGN(TAHITI_SMC_UCODE_SIZE, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) case CHIP_PITCAIRN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) chip_name = "PITCAIRN";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) if ((rdev->pdev->revision == 0x81) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) ((rdev->pdev->device == 0x6810) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) (rdev->pdev->device == 0x6811)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) new_smc = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) new_chip_name = "pitcairn";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) pfp_req_size = SI_PFP_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) me_req_size = SI_PM4_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) ce_req_size = SI_CE_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) rlc_req_size = SI_RLC_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) mc_req_size = SI_MC_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) mc2_req_size = PITCAIRN_MC_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) smc_req_size = ALIGN(PITCAIRN_SMC_UCODE_SIZE, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) case CHIP_VERDE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) chip_name = "VERDE";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) if (((rdev->pdev->device == 0x6820) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) ((rdev->pdev->revision == 0x81) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) (rdev->pdev->revision == 0x83))) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) ((rdev->pdev->device == 0x6821) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) ((rdev->pdev->revision == 0x83) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) (rdev->pdev->revision == 0x87))) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) ((rdev->pdev->revision == 0x87) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) ((rdev->pdev->device == 0x6823) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) (rdev->pdev->device == 0x682b))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) new_smc = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) new_chip_name = "verde";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) pfp_req_size = SI_PFP_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) me_req_size = SI_PM4_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) ce_req_size = SI_CE_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) rlc_req_size = SI_RLC_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) mc_req_size = SI_MC_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) mc2_req_size = VERDE_MC_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) smc_req_size = ALIGN(VERDE_SMC_UCODE_SIZE, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) case CHIP_OLAND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) chip_name = "OLAND";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) if (((rdev->pdev->revision == 0x81) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) ((rdev->pdev->device == 0x6600) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) (rdev->pdev->device == 0x6604) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) (rdev->pdev->device == 0x6605) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) (rdev->pdev->device == 0x6610))) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) ((rdev->pdev->revision == 0x83) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) (rdev->pdev->device == 0x6610)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) new_smc = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) new_chip_name = "oland";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) pfp_req_size = SI_PFP_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) me_req_size = SI_PM4_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) ce_req_size = SI_CE_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) rlc_req_size = SI_RLC_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) mc_req_size = mc2_req_size = OLAND_MC_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) smc_req_size = ALIGN(OLAND_SMC_UCODE_SIZE, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) case CHIP_HAINAN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) chip_name = "HAINAN";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) if (((rdev->pdev->revision == 0x81) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) (rdev->pdev->device == 0x6660)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) ((rdev->pdev->revision == 0x83) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) ((rdev->pdev->device == 0x6660) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) (rdev->pdev->device == 0x6663) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) (rdev->pdev->device == 0x6665) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) (rdev->pdev->device == 0x6667))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) new_smc = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) else if ((rdev->pdev->revision == 0xc3) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) (rdev->pdev->device == 0x6665))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) banks2_fw = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) new_chip_name = "hainan";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) pfp_req_size = SI_PFP_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) me_req_size = SI_PM4_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) ce_req_size = SI_CE_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) rlc_req_size = SI_RLC_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) mc_req_size = mc2_req_size = OLAND_MC_UCODE_SIZE * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) smc_req_size = ALIGN(HAINAN_SMC_UCODE_SIZE, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) default: BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) /* this memory configuration requires special firmware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) if (((RREG32(MC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) si58_fw = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) DRM_INFO("Loading %s Microcode\n", new_chip_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) if (rdev->pfp_fw->size != pfp_req_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) pr_err("si_cp: Bogus length %zu in firmware \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) rdev->pfp_fw->size, fw_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) err = radeon_ucode_validate(rdev->pfp_fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) pr_err("si_cp: validation failed for firmware \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) fw_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) new_fw++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", new_chip_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) if (rdev->me_fw->size != me_req_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) pr_err("si_cp: Bogus length %zu in firmware \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) rdev->me_fw->size, fw_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) err = radeon_ucode_validate(rdev->me_fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) pr_err("si_cp: validation failed for firmware \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) fw_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) new_fw++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", new_chip_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) if (rdev->ce_fw->size != ce_req_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) pr_err("si_cp: Bogus length %zu in firmware \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) rdev->ce_fw->size, fw_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) err = radeon_ucode_validate(rdev->ce_fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) pr_err("si_cp: validation failed for firmware \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) fw_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) new_fw++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", new_chip_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) if (rdev->rlc_fw->size != rlc_req_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) pr_err("si_rlc: Bogus length %zu in firmware \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) rdev->rlc_fw->size, fw_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) err = radeon_ucode_validate(rdev->rlc_fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) pr_err("si_cp: validation failed for firmware \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) fw_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) new_fw++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) if (si58_fw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) if ((rdev->mc_fw->size != mc_req_size) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) (rdev->mc_fw->size != mc2_req_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) pr_err("si_mc: Bogus length %zu in firmware \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) rdev->mc_fw->size, fw_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) err = radeon_ucode_validate(rdev->mc_fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) pr_err("si_cp: validation failed for firmware \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) fw_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) new_fw++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) if (banks2_fw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) snprintf(fw_name, sizeof(fw_name), "radeon/banks_k_2_smc.bin");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) else if (new_smc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) pr_err("smc: error loading firmware \"%s\"\n", fw_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) release_firmware(rdev->smc_fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) rdev->smc_fw = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) } else if (rdev->smc_fw->size != smc_req_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) pr_err("si_smc: Bogus length %zu in firmware \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) rdev->smc_fw->size, fw_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) err = radeon_ucode_validate(rdev->smc_fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) pr_err("si_cp: validation failed for firmware \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) fw_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) new_fw++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) if (new_fw == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) rdev->new_fw = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) } else if (new_fw < 6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) pr_err("si_fw: mixing new and old firmware!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) rdev->new_fw = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) if (err != -EINVAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) pr_err("si_cp: Failed to load firmware \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) fw_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) release_firmware(rdev->pfp_fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) rdev->pfp_fw = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) release_firmware(rdev->me_fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) rdev->me_fw = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) release_firmware(rdev->ce_fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) rdev->ce_fw = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) release_firmware(rdev->rlc_fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) rdev->rlc_fw = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) release_firmware(rdev->mc_fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) rdev->mc_fw = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) release_firmware(rdev->smc_fw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) rdev->smc_fw = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) /* watermark setup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) static u32 dce6_line_buffer_adjust(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) struct radeon_crtc *radeon_crtc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) struct drm_display_mode *mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) struct drm_display_mode *other_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) u32 tmp, buffer_alloc, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) * Line Buffer Setup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) * There are 3 line buffers, each one shared by 2 display controllers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) * the display controllers. The paritioning is done via one of four
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) * preset allocations specified in bits 21:20:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) * 0 - half lb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) * 2 - whole lb, other crtc must be disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) /* this can get tricky if we have two large displays on a paired group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) * of crtcs. Ideally for multiple large displays we'd assign them to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) * non-linked crtcs for maximum line buffer allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) if (radeon_crtc->base.enabled && mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) if (other_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) tmp = 0; /* 1/2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) buffer_alloc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) tmp = 2; /* whole */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) buffer_alloc = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) tmp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) buffer_alloc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) DC_LB_MEMORY_CONFIG(tmp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) DMIF_BUFFERS_ALLOCATED(buffer_alloc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) for (i = 0; i < rdev->usec_timeout; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) DMIF_BUFFERS_ALLOCATED_COMPLETED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) if (radeon_crtc->base.enabled && mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) switch (tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) return 4096 * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) return 8192 * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) /* controller not enabled, so no lb used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) static u32 si_get_number_of_dram_channels(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) u32 tmp = RREG32(MC_SHARED_CHMAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) return 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) return 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) return 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) return 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) case 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) return 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) case 6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) return 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) case 7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) return 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) return 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) struct dce6_wm_params {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) u32 dram_channels; /* number of dram channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) u32 yclk; /* bandwidth per dram data pin in kHz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) u32 sclk; /* engine clock in kHz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) u32 disp_clk; /* display clock in kHz */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) u32 src_width; /* viewport width */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) u32 active_time; /* active display time in ns */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) u32 blank_time; /* blank time in ns */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) bool interlaced; /* mode is interlaced */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) fixed20_12 vsc; /* vertical scale ratio */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) u32 num_heads; /* number of active crtcs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) u32 bytes_per_pixel; /* bytes per pixel display + overlay */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) u32 lb_size; /* line buffer allocated to pipe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) u32 vtaps; /* vertical scaler taps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) static u32 dce6_dram_bandwidth(struct dce6_wm_params *wm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) /* Calculate raw DRAM Bandwidth */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) fixed20_12 dram_efficiency; /* 0.7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) fixed20_12 yclk, dram_channels, bandwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) fixed20_12 a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) a.full = dfixed_const(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) yclk.full = dfixed_const(wm->yclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) yclk.full = dfixed_div(yclk, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) dram_channels.full = dfixed_const(wm->dram_channels * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) a.full = dfixed_const(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) dram_efficiency.full = dfixed_const(7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) dram_efficiency.full = dfixed_div(dram_efficiency, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) bandwidth.full = dfixed_mul(dram_channels, yclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) return dfixed_trunc(bandwidth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) static u32 dce6_dram_bandwidth_for_display(struct dce6_wm_params *wm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) /* Calculate DRAM Bandwidth and the part allocated to display. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) fixed20_12 yclk, dram_channels, bandwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) fixed20_12 a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) a.full = dfixed_const(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) yclk.full = dfixed_const(wm->yclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) yclk.full = dfixed_div(yclk, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) dram_channels.full = dfixed_const(wm->dram_channels * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) a.full = dfixed_const(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) bandwidth.full = dfixed_mul(dram_channels, yclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) return dfixed_trunc(bandwidth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) static u32 dce6_data_return_bandwidth(struct dce6_wm_params *wm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) /* Calculate the display Data return Bandwidth */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) fixed20_12 return_efficiency; /* 0.8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) fixed20_12 sclk, bandwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) fixed20_12 a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) a.full = dfixed_const(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) sclk.full = dfixed_const(wm->sclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) sclk.full = dfixed_div(sclk, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) a.full = dfixed_const(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) return_efficiency.full = dfixed_const(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) return_efficiency.full = dfixed_div(return_efficiency, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) a.full = dfixed_const(32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) bandwidth.full = dfixed_mul(a, sclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) return dfixed_trunc(bandwidth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) static u32 dce6_get_dmif_bytes_per_request(struct dce6_wm_params *wm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) return 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) static u32 dce6_dmif_request_bandwidth(struct dce6_wm_params *wm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) /* Calculate the DMIF Request Bandwidth */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) fixed20_12 disp_clk_request_efficiency; /* 0.8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) fixed20_12 disp_clk, sclk, bandwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) fixed20_12 a, b1, b2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) u32 min_bandwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) a.full = dfixed_const(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) disp_clk.full = dfixed_const(wm->disp_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) disp_clk.full = dfixed_div(disp_clk, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) a.full = dfixed_const(dce6_get_dmif_bytes_per_request(wm) / 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) b1.full = dfixed_mul(a, disp_clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) a.full = dfixed_const(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) sclk.full = dfixed_const(wm->sclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) sclk.full = dfixed_div(sclk, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) a.full = dfixed_const(dce6_get_dmif_bytes_per_request(wm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) b2.full = dfixed_mul(a, sclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) a.full = dfixed_const(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) disp_clk_request_efficiency.full = dfixed_const(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) min_bandwidth = min(dfixed_trunc(b1), dfixed_trunc(b2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) a.full = dfixed_const(min_bandwidth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) bandwidth.full = dfixed_mul(a, disp_clk_request_efficiency);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) return dfixed_trunc(bandwidth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) static u32 dce6_available_bandwidth(struct dce6_wm_params *wm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) u32 dram_bandwidth = dce6_dram_bandwidth(wm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) u32 data_return_bandwidth = dce6_data_return_bandwidth(wm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) u32 dmif_req_bandwidth = dce6_dmif_request_bandwidth(wm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) static u32 dce6_average_bandwidth(struct dce6_wm_params *wm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) /* Calculate the display mode Average Bandwidth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) * DisplayMode should contain the source and destination dimensions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) * timing, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) fixed20_12 bpp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) fixed20_12 line_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) fixed20_12 src_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) fixed20_12 bandwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) fixed20_12 a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) a.full = dfixed_const(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) line_time.full = dfixed_const(wm->active_time + wm->blank_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) line_time.full = dfixed_div(line_time, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) bpp.full = dfixed_const(wm->bytes_per_pixel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) src_width.full = dfixed_const(wm->src_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) bandwidth.full = dfixed_mul(src_width, bpp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) bandwidth.full = dfixed_div(bandwidth, line_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) return dfixed_trunc(bandwidth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) static u32 dce6_latency_watermark(struct dce6_wm_params *wm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) /* First calcualte the latency in ns */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) u32 mc_latency = 2000; /* 2000 ns. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) u32 available_bandwidth = dce6_available_bandwidth(wm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) (wm->num_heads * cursor_line_pair_return_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) u32 tmp, dmif_size = 12288;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) fixed20_12 a, b, c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) if (wm->num_heads == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) a.full = dfixed_const(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) b.full = dfixed_const(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) if ((wm->vsc.full > a.full) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) (wm->vtaps >= 5) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) ((wm->vsc.full >= a.full) && wm->interlaced))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) max_src_lines_per_dst_line = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) max_src_lines_per_dst_line = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) a.full = dfixed_const(available_bandwidth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) b.full = dfixed_const(wm->num_heads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) a.full = dfixed_div(a, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) tmp = min(dfixed_trunc(a), tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) b.full = dfixed_const(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) c.full = dfixed_const(lb_fill_bw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) b.full = dfixed_div(c, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) a.full = dfixed_div(a, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) line_fill_time = dfixed_trunc(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) if (line_fill_time < wm->active_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) return latency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) return latency + (line_fill_time - wm->active_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) static bool dce6_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) if (dce6_average_bandwidth(wm) <=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) (dce6_dram_bandwidth_for_display(wm) / wm->num_heads))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) static bool dce6_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) if (dce6_average_bandwidth(wm) <=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) (dce6_available_bandwidth(wm) / wm->num_heads))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) static bool dce6_check_latency_hiding(struct dce6_wm_params *wm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) u32 lb_partitions = wm->lb_size / wm->src_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) u32 line_time = wm->active_time + wm->blank_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) u32 latency_tolerant_lines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) u32 latency_hiding;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) fixed20_12 a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) a.full = dfixed_const(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) if (wm->vsc.full > a.full)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) latency_tolerant_lines = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) if (lb_partitions <= (wm->vtaps + 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) latency_tolerant_lines = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) latency_tolerant_lines = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) if (dce6_latency_watermark(wm) <= latency_hiding)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) static void dce6_program_watermarks(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) struct radeon_crtc *radeon_crtc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) u32 lb_size, u32 num_heads)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) struct drm_display_mode *mode = &radeon_crtc->base.mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) struct dce6_wm_params wm_low, wm_high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) u32 dram_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) u32 active_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) u32 line_time = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) u32 latency_watermark_a = 0, latency_watermark_b = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) u32 priority_a_mark = 0, priority_b_mark = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) u32 priority_a_cnt = PRIORITY_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) u32 priority_b_cnt = PRIORITY_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) u32 tmp, arb_control3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) fixed20_12 a, b, c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) if (radeon_crtc->base.enabled && num_heads && mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) (u32)mode->clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) (u32)mode->clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) line_time = min(line_time, (u32)65535);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) priority_a_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) priority_b_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) if (rdev->family == CHIP_ARUBA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) dram_channels = evergreen_get_number_of_dram_channels(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) dram_channels = si_get_number_of_dram_channels(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) /* watermark for high clocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) wm_high.yclk =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) radeon_dpm_get_mclk(rdev, false) * 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) wm_high.sclk =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) radeon_dpm_get_sclk(rdev, false) * 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) wm_high.yclk = rdev->pm.current_mclk * 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) wm_high.sclk = rdev->pm.current_sclk * 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) wm_high.disp_clk = mode->clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) wm_high.src_width = mode->crtc_hdisplay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) wm_high.active_time = active_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) wm_high.blank_time = line_time - wm_high.active_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) wm_high.interlaced = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) if (mode->flags & DRM_MODE_FLAG_INTERLACE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) wm_high.interlaced = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) wm_high.vsc = radeon_crtc->vsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) wm_high.vtaps = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) if (radeon_crtc->rmx_type != RMX_OFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) wm_high.vtaps = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) wm_high.lb_size = lb_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) wm_high.dram_channels = dram_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) wm_high.num_heads = num_heads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) /* watermark for low clocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) wm_low.yclk =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) radeon_dpm_get_mclk(rdev, true) * 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) wm_low.sclk =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) radeon_dpm_get_sclk(rdev, true) * 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) wm_low.yclk = rdev->pm.current_mclk * 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) wm_low.sclk = rdev->pm.current_sclk * 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) wm_low.disp_clk = mode->clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) wm_low.src_width = mode->crtc_hdisplay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) wm_low.active_time = active_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) wm_low.blank_time = line_time - wm_low.active_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) wm_low.interlaced = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) if (mode->flags & DRM_MODE_FLAG_INTERLACE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) wm_low.interlaced = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) wm_low.vsc = radeon_crtc->vsc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) wm_low.vtaps = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) if (radeon_crtc->rmx_type != RMX_OFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) wm_low.vtaps = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) wm_low.lb_size = lb_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) wm_low.dram_channels = dram_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) wm_low.num_heads = num_heads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) /* set for high clocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) latency_watermark_a = min(dce6_latency_watermark(&wm_high), (u32)65535);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) /* set for low clocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) latency_watermark_b = min(dce6_latency_watermark(&wm_low), (u32)65535);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) /* possibly force display priority to high */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) /* should really do this at mode validation time... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) !dce6_average_bandwidth_vs_available_bandwidth(&wm_high) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) !dce6_check_latency_hiding(&wm_high) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) (rdev->disp_priority == 2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) DRM_DEBUG_KMS("force priority to high\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) priority_a_cnt |= PRIORITY_ALWAYS_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) priority_b_cnt |= PRIORITY_ALWAYS_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) !dce6_average_bandwidth_vs_available_bandwidth(&wm_low) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) !dce6_check_latency_hiding(&wm_low) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) (rdev->disp_priority == 2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) DRM_DEBUG_KMS("force priority to high\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) priority_a_cnt |= PRIORITY_ALWAYS_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) priority_b_cnt |= PRIORITY_ALWAYS_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) a.full = dfixed_const(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) b.full = dfixed_const(mode->clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) b.full = dfixed_div(b, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) c.full = dfixed_const(latency_watermark_a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) c.full = dfixed_mul(c, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) c.full = dfixed_mul(c, radeon_crtc->hsc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) c.full = dfixed_div(c, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) a.full = dfixed_const(16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) c.full = dfixed_div(c, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) priority_a_mark = dfixed_trunc(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) a.full = dfixed_const(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) b.full = dfixed_const(mode->clock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) b.full = dfixed_div(b, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) c.full = dfixed_const(latency_watermark_b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) c.full = dfixed_mul(c, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) c.full = dfixed_mul(c, radeon_crtc->hsc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) c.full = dfixed_div(c, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) a.full = dfixed_const(16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) c.full = dfixed_div(c, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) priority_b_mark = dfixed_trunc(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) /* Save number of lines the linebuffer leads before the scanout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) /* select wm A */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) arb_control3 = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) tmp = arb_control3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) tmp &= ~LATENCY_WATERMARK_MASK(3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) tmp |= LATENCY_WATERMARK_MASK(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) (LATENCY_LOW_WATERMARK(latency_watermark_a) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) LATENCY_HIGH_WATERMARK(line_time)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) /* select wm B */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) tmp = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) tmp &= ~LATENCY_WATERMARK_MASK(3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) tmp |= LATENCY_WATERMARK_MASK(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) (LATENCY_LOW_WATERMARK(latency_watermark_b) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) LATENCY_HIGH_WATERMARK(line_time)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) /* restore original selection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, arb_control3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) /* write the priority marks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) /* save values for DPM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) radeon_crtc->line_time = line_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) radeon_crtc->wm_high = latency_watermark_a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) radeon_crtc->wm_low = latency_watermark_b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) void dce6_bandwidth_update(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) struct drm_display_mode *mode0 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) struct drm_display_mode *mode1 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) u32 num_heads = 0, lb_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) if (!rdev->mode_info.mode_config_initialized)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) radeon_update_display_priority(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) for (i = 0; i < rdev->num_crtc; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) if (rdev->mode_info.crtcs[i]->base.enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) num_heads++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) for (i = 0; i < rdev->num_crtc; i += 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) mode0 = &rdev->mode_info.crtcs[i]->base.mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) mode1 = &rdev->mode_info.crtcs[i+1]->base.mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) * Core functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) static void si_tiling_mode_table_init(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) u32 *tile = rdev->config.si.tile_mode_array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) const u32 num_tile_mode_states =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) ARRAY_SIZE(rdev->config.si.tile_mode_array);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) u32 reg_offset, split_equal_to_row_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) switch (rdev->config.si.mem_row_size_in_kb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) tile[reg_offset] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) switch(rdev->family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) case CHIP_TAHITI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) case CHIP_PITCAIRN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) /* non-AA compressed depth or any compressed stencil */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) NUM_BANKS(ADDR_SURF_16_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) /* 2xAA/4xAA compressed depth only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) NUM_BANKS(ADDR_SURF_16_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) /* 8xAA compressed depth only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) NUM_BANKS(ADDR_SURF_16_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) NUM_BANKS(ADDR_SURF_16_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) tile[4] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) NUM_BANKS(ADDR_SURF_16_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) tile[5] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) TILE_SPLIT(split_equal_to_row_size) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) NUM_BANKS(ADDR_SURF_16_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) tile[6] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) TILE_SPLIT(split_equal_to_row_size) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) NUM_BANKS(ADDR_SURF_16_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) tile[7] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) TILE_SPLIT(split_equal_to_row_size) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) NUM_BANKS(ADDR_SURF_16_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) /* 1D and 1D Array Surfaces */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) NUM_BANKS(ADDR_SURF_16_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) /* Displayable maps. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) NUM_BANKS(ADDR_SURF_16_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) /* Display 8bpp. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) NUM_BANKS(ADDR_SURF_16_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) /* Display 16bpp. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) tile[11] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) NUM_BANKS(ADDR_SURF_16_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) /* Display 32bpp. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) tile[12] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) NUM_BANKS(ADDR_SURF_16_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) /* Thin. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) NUM_BANKS(ADDR_SURF_16_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) /* Thin 8 bpp. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) NUM_BANKS(ADDR_SURF_16_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) /* Thin 16 bpp. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) tile[15] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) NUM_BANKS(ADDR_SURF_16_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) /* Thin 32 bpp. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) tile[16] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) NUM_BANKS(ADDR_SURF_16_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) /* Thin 64 bpp. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) tile[17] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) TILE_SPLIT(split_equal_to_row_size) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) NUM_BANKS(ADDR_SURF_16_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) /* 8 bpp PRT. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) tile[21] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) NUM_BANKS(ADDR_SURF_16_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) /* 16 bpp PRT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) tile[22] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) NUM_BANKS(ADDR_SURF_16_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) /* 32 bpp PRT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) tile[23] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) NUM_BANKS(ADDR_SURF_16_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) /* 64 bpp PRT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) NUM_BANKS(ADDR_SURF_16_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) /* 128 bpp PRT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) NUM_BANKS(ADDR_SURF_8_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) case CHIP_VERDE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) case CHIP_OLAND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) case CHIP_HAINAN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) /* non-AA compressed depth or any compressed stencil */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) PIPE_CONFIG(ADDR_SURF_P4_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) NUM_BANKS(ADDR_SURF_16_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) /* 2xAA/4xAA compressed depth only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) PIPE_CONFIG(ADDR_SURF_P4_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) NUM_BANKS(ADDR_SURF_16_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) /* 8xAA compressed depth only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) PIPE_CONFIG(ADDR_SURF_P4_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) NUM_BANKS(ADDR_SURF_16_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) PIPE_CONFIG(ADDR_SURF_P4_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) NUM_BANKS(ADDR_SURF_16_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) tile[4] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) PIPE_CONFIG(ADDR_SURF_P4_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) NUM_BANKS(ADDR_SURF_16_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) tile[5] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) PIPE_CONFIG(ADDR_SURF_P4_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) TILE_SPLIT(split_equal_to_row_size) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) NUM_BANKS(ADDR_SURF_16_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) tile[6] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) PIPE_CONFIG(ADDR_SURF_P4_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) TILE_SPLIT(split_equal_to_row_size) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) NUM_BANKS(ADDR_SURF_16_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) tile[7] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) PIPE_CONFIG(ADDR_SURF_P4_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) TILE_SPLIT(split_equal_to_row_size) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) NUM_BANKS(ADDR_SURF_16_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) /* 1D and 1D Array Surfaces */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) PIPE_CONFIG(ADDR_SURF_P4_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) NUM_BANKS(ADDR_SURF_16_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) /* Displayable maps. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) PIPE_CONFIG(ADDR_SURF_P4_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) NUM_BANKS(ADDR_SURF_16_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) /* Display 8bpp. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) PIPE_CONFIG(ADDR_SURF_P4_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) NUM_BANKS(ADDR_SURF_16_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) /* Display 16bpp. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) tile[11] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) PIPE_CONFIG(ADDR_SURF_P4_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) NUM_BANKS(ADDR_SURF_16_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) /* Display 32bpp. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) tile[12] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) PIPE_CONFIG(ADDR_SURF_P4_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) NUM_BANKS(ADDR_SURF_16_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) /* Thin. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) PIPE_CONFIG(ADDR_SURF_P4_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) NUM_BANKS(ADDR_SURF_16_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) /* Thin 8 bpp. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) PIPE_CONFIG(ADDR_SURF_P4_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) NUM_BANKS(ADDR_SURF_16_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) /* Thin 16 bpp. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) tile[15] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) PIPE_CONFIG(ADDR_SURF_P4_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) NUM_BANKS(ADDR_SURF_16_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) /* Thin 32 bpp. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) tile[16] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) PIPE_CONFIG(ADDR_SURF_P4_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) NUM_BANKS(ADDR_SURF_16_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) /* Thin 64 bpp. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) tile[17] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) PIPE_CONFIG(ADDR_SURF_P4_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) TILE_SPLIT(split_equal_to_row_size) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) NUM_BANKS(ADDR_SURF_16_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) /* 8 bpp PRT. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) tile[21] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) NUM_BANKS(ADDR_SURF_16_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) /* 16 bpp PRT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) tile[22] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) NUM_BANKS(ADDR_SURF_16_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) /* 32 bpp PRT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) tile[23] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) NUM_BANKS(ADDR_SURF_16_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) /* 64 bpp PRT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) NUM_BANKS(ADDR_SURF_16_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) /* 128 bpp PRT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) NUM_BANKS(ADDR_SURF_8_BANK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) DRM_ERROR("unknown asic: 0x%x\n", rdev->family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) static void si_select_se_sh(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) u32 se_num, u32 sh_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) u32 data = INSTANCE_BROADCAST_WRITES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) data |= SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) else if (se_num == 0xffffffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) else if (sh_num == 0xffffffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) data |= SH_BROADCAST_WRITES | SE_INDEX(se_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) data |= SH_INDEX(sh_num) | SE_INDEX(se_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) WREG32(GRBM_GFX_INDEX, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) static u32 si_create_bitmask(u32 bit_width)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) u32 i, mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) for (i = 0; i < bit_width; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) mask <<= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) mask |= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) return mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) static u32 si_get_cu_enabled(struct radeon_device *rdev, u32 cu_per_sh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) u32 data, mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) data = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) if (data & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) data &= INACTIVE_CUS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) data |= RREG32(GC_USER_SHADER_ARRAY_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) data >>= INACTIVE_CUS_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) mask = si_create_bitmask(cu_per_sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) return ~data & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) static void si_setup_spi(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) u32 se_num, u32 sh_per_se,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) u32 cu_per_sh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) int i, j, k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) u32 data, mask, active_cu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) for (i = 0; i < se_num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) for (j = 0; j < sh_per_se; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) si_select_se_sh(rdev, i, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) data = RREG32(SPI_STATIC_THREAD_MGMT_3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) active_cu = si_get_cu_enabled(rdev, cu_per_sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) mask = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) for (k = 0; k < 16; k++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) mask <<= k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) if (active_cu & mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) data &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) WREG32(SPI_STATIC_THREAD_MGMT_3, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) static u32 si_get_rb_disabled(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) u32 max_rb_num_per_se,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) u32 sh_per_se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) u32 data, mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) data = RREG32(CC_RB_BACKEND_DISABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) if (data & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) data &= BACKEND_DISABLE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) data |= RREG32(GC_USER_RB_BACKEND_DISABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) data >>= BACKEND_DISABLE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) mask = si_create_bitmask(max_rb_num_per_se / sh_per_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) return data & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) static void si_setup_rb(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) u32 se_num, u32 sh_per_se,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) u32 max_rb_num_per_se)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) u32 data, mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) u32 disabled_rbs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) u32 enabled_rbs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) for (i = 0; i < se_num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) for (j = 0; j < sh_per_se; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) si_select_se_sh(rdev, i, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) data = si_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) mask = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) for (i = 0; i < max_rb_num_per_se * se_num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) if (!(disabled_rbs & mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) enabled_rbs |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) mask <<= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) rdev->config.si.backend_enable_mask = enabled_rbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) for (i = 0; i < se_num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) si_select_se_sh(rdev, i, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) for (j = 0; j < sh_per_se; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) switch (enabled_rbs & 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) enabled_rbs >>= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) WREG32(PA_SC_RASTER_CONFIG, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) static void si_gpu_init(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) u32 gb_addr_config = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) u32 mc_shared_chmap, mc_arb_ramcfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) u32 sx_debug_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) u32 hdp_host_path_cntl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) switch (rdev->family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) case CHIP_TAHITI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) rdev->config.si.max_shader_engines = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) rdev->config.si.max_tile_pipes = 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) rdev->config.si.max_cu_per_sh = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) rdev->config.si.max_sh_per_se = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) rdev->config.si.max_backends_per_se = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) rdev->config.si.max_texture_channel_caches = 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) rdev->config.si.max_gprs = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) rdev->config.si.max_gs_threads = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) rdev->config.si.max_hw_contexts = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) rdev->config.si.sc_prim_fifo_size_backend = 0x100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) case CHIP_PITCAIRN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) rdev->config.si.max_shader_engines = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) rdev->config.si.max_tile_pipes = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) rdev->config.si.max_cu_per_sh = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) rdev->config.si.max_sh_per_se = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) rdev->config.si.max_backends_per_se = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) rdev->config.si.max_texture_channel_caches = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) rdev->config.si.max_gprs = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) rdev->config.si.max_gs_threads = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) rdev->config.si.max_hw_contexts = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) rdev->config.si.sc_prim_fifo_size_backend = 0x100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) case CHIP_VERDE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) rdev->config.si.max_shader_engines = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) rdev->config.si.max_tile_pipes = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) rdev->config.si.max_cu_per_sh = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) rdev->config.si.max_sh_per_se = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) rdev->config.si.max_backends_per_se = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) rdev->config.si.max_texture_channel_caches = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) rdev->config.si.max_gprs = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) rdev->config.si.max_gs_threads = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) rdev->config.si.max_hw_contexts = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) rdev->config.si.sc_prim_fifo_size_backend = 0x40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) case CHIP_OLAND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) rdev->config.si.max_shader_engines = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) rdev->config.si.max_tile_pipes = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) rdev->config.si.max_cu_per_sh = 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) rdev->config.si.max_sh_per_se = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) rdev->config.si.max_backends_per_se = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) rdev->config.si.max_texture_channel_caches = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) rdev->config.si.max_gprs = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) rdev->config.si.max_gs_threads = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) rdev->config.si.max_hw_contexts = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) rdev->config.si.sc_prim_fifo_size_backend = 0x40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) case CHIP_HAINAN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) rdev->config.si.max_shader_engines = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) rdev->config.si.max_tile_pipes = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) rdev->config.si.max_cu_per_sh = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) rdev->config.si.max_sh_per_se = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) rdev->config.si.max_backends_per_se = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) rdev->config.si.max_texture_channel_caches = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) rdev->config.si.max_gprs = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) rdev->config.si.max_gs_threads = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) rdev->config.si.max_hw_contexts = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) rdev->config.si.sc_prim_fifo_size_backend = 0x40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) gb_addr_config = HAINAN_GB_ADDR_CONFIG_GOLDEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) /* Initialize HDP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) for (i = 0, j = 0; i < 32; i++, j += 0x18) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) WREG32((0x2c14 + j), 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) WREG32((0x2c18 + j), 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) WREG32((0x2c1c + j), 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) WREG32((0x2c20 + j), 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) WREG32((0x2c24 + j), 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) WREG32(SRBM_INT_CNTL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) WREG32(SRBM_INT_ACK, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) evergreen_fix_pci_max_read_req_size(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) rdev->config.si.num_tile_pipes = rdev->config.si.max_tile_pipes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) rdev->config.si.mem_max_burst_length_bytes = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) rdev->config.si.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) if (rdev->config.si.mem_row_size_in_kb > 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) rdev->config.si.mem_row_size_in_kb = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) /* XXX use MC settings? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) rdev->config.si.shader_engine_tile_size = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) rdev->config.si.num_gpus = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) rdev->config.si.multi_gpu_tile_size = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) /* fix up row size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) gb_addr_config &= ~ROW_SIZE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) switch (rdev->config.si.mem_row_size_in_kb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) gb_addr_config |= ROW_SIZE(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) gb_addr_config |= ROW_SIZE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) gb_addr_config |= ROW_SIZE(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) /* setup tiling info dword. gb_addr_config is not adequate since it does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) * not have bank info, so create a custom tiling dword.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) * bits 3:0 num_pipes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) * bits 7:4 num_banks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) * bits 11:8 group_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) * bits 15:12 row_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) rdev->config.si.tile_config = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) switch (rdev->config.si.num_tile_pipes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) rdev->config.si.tile_config |= (0 << 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) rdev->config.si.tile_config |= (1 << 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) rdev->config.si.tile_config |= (2 << 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) /* XXX what about 12? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) rdev->config.si.tile_config |= (3 << 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) case 0: /* four banks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) rdev->config.si.tile_config |= 0 << 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) case 1: /* eight banks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) rdev->config.si.tile_config |= 1 << 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) case 2: /* sixteen banks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) rdev->config.si.tile_config |= 2 << 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) rdev->config.si.tile_config |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) rdev->config.si.tile_config |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) WREG32(GB_ADDR_CONFIG, gb_addr_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) WREG32(DMIF_ADDR_CALC, gb_addr_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) WREG32(HDP_ADDR_CONFIG, gb_addr_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) if (rdev->has_uvd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) si_tiling_mode_table_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) si_setup_rb(rdev, rdev->config.si.max_shader_engines,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) rdev->config.si.max_sh_per_se,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) rdev->config.si.max_backends_per_se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) si_setup_spi(rdev, rdev->config.si.max_shader_engines,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) rdev->config.si.max_sh_per_se,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) rdev->config.si.max_cu_per_sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) rdev->config.si.active_cus = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) for (i = 0; i < rdev->config.si.max_shader_engines; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) for (j = 0; j < rdev->config.si.max_sh_per_se; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) rdev->config.si.active_cus +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) hweight32(si_get_cu_active_bitmap(rdev, i, j));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) /* set HW defaults for 3D engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) ROQ_IB2_START(0x2b)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) sx_debug_1 = RREG32(SX_DEBUG_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) WREG32(SX_DEBUG_1, sx_debug_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) WREG32(PA_SC_FIFO_SIZE, (SC_FRONTEND_PRIM_FIFO_SIZE(rdev->config.si.sc_prim_fifo_size_frontend) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) SC_BACKEND_PRIM_FIFO_SIZE(rdev->config.si.sc_prim_fifo_size_backend) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) SC_HIZ_TILE_FIFO_SIZE(rdev->config.si.sc_hiz_tile_fifo_size) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.si.sc_earlyz_tile_fifo_size)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) WREG32(VGT_NUM_INSTANCES, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) WREG32(CP_PERFMON_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) WREG32(SQ_CONFIG, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) FORCE_EOV_MAX_REZ_CNT(255)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) AUTO_INVLD_EN(ES_AND_GS_AUTO));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) WREG32(VGT_GS_VERTEX_REUSE, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) WREG32(CB_PERFCOUNTER0_SELECT0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) WREG32(CB_PERFCOUNTER0_SELECT1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) WREG32(CB_PERFCOUNTER1_SELECT0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) WREG32(CB_PERFCOUNTER1_SELECT1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) WREG32(CB_PERFCOUNTER2_SELECT0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) WREG32(CB_PERFCOUNTER2_SELECT1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) WREG32(CB_PERFCOUNTER3_SELECT0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) WREG32(CB_PERFCOUNTER3_SELECT1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) tmp = RREG32(HDP_MISC_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) tmp |= HDP_FLUSH_INVALIDATE_CACHE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) WREG32(HDP_MISC_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) udelay(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) * GPU scratch registers helpers function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) static void si_scratch_init(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) rdev->scratch.num_reg = 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) rdev->scratch.reg_base = SCRATCH_REG0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) for (i = 0; i < rdev->scratch.num_reg; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) rdev->scratch.free[i] = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) void si_fence_ring_emit(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) struct radeon_fence *fence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) struct radeon_ring *ring = &rdev->ring[fence->ring];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) /* flush read cache over gart */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) radeon_ring_write(ring, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) PACKET3_TC_ACTION_ENA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) PACKET3_SH_KCACHE_ACTION_ENA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) PACKET3_SH_ICACHE_ACTION_ENA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) radeon_ring_write(ring, 0xFFFFFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) radeon_ring_write(ring, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) radeon_ring_write(ring, 10); /* poll interval */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) /* EVENT_WRITE_EOP - flush caches, send int */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) radeon_ring_write(ring, lower_32_bits(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) radeon_ring_write(ring, fence->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) radeon_ring_write(ring, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) * IB stuff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) struct radeon_ring *ring = &rdev->ring[ib->ring];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) u32 header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) if (ib->is_const_ib) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) /* set switch buffer packet before const IB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) radeon_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) radeon_ring_write(ring, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) u32 next_rptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) if (ring->rptr_save_reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) next_rptr = ring->wptr + 3 + 4 + 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) radeon_ring_write(ring, ((ring->rptr_save_reg -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) PACKET3_SET_CONFIG_REG_START) >> 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) radeon_ring_write(ring, next_rptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) } else if (rdev->wb.enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) next_rptr = ring->wptr + 5 + 4 + 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) radeon_ring_write(ring, (1 << 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) radeon_ring_write(ring, next_rptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) radeon_ring_write(ring, header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) radeon_ring_write(ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) #ifdef __BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) (2 << 0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) (ib->gpu_addr & 0xFFFFFFFC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) radeon_ring_write(ring, ib->length_dw | (vm_id << 24));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) if (!ib->is_const_ib) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) /* flush read cache over gart for this vmid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) radeon_ring_write(ring, vm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) PACKET3_TC_ACTION_ENA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) PACKET3_SH_KCACHE_ACTION_ENA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) PACKET3_SH_ICACHE_ACTION_ENA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) radeon_ring_write(ring, 0xFFFFFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) radeon_ring_write(ring, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) radeon_ring_write(ring, 10); /* poll interval */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) * CP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) static void si_cp_enable(struct radeon_device *rdev, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) WREG32(CP_ME_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) WREG32(SCRATCH_UMSK, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) udelay(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) static int si_cp_load_microcode(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) si_cp_enable(rdev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) if (rdev->new_fw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) const struct gfx_firmware_header_v1_0 *pfp_hdr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) (const struct gfx_firmware_header_v1_0 *)rdev->pfp_fw->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) const struct gfx_firmware_header_v1_0 *ce_hdr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) (const struct gfx_firmware_header_v1_0 *)rdev->ce_fw->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) const struct gfx_firmware_header_v1_0 *me_hdr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) (const struct gfx_firmware_header_v1_0 *)rdev->me_fw->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) const __le32 *fw_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) u32 fw_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) radeon_ucode_print_gfx_hdr(&pfp_hdr->header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) radeon_ucode_print_gfx_hdr(&ce_hdr->header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) radeon_ucode_print_gfx_hdr(&me_hdr->header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) /* PFP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) fw_data = (const __le32 *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) (rdev->pfp_fw->data + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) WREG32(CP_PFP_UCODE_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) for (i = 0; i < fw_size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) WREG32(CP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) WREG32(CP_PFP_UCODE_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) /* CE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) fw_data = (const __le32 *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) (rdev->ce_fw->data + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) WREG32(CP_CE_UCODE_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) for (i = 0; i < fw_size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) WREG32(CP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) WREG32(CP_CE_UCODE_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) /* ME */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) fw_data = (const __be32 *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) (rdev->me_fw->data + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) WREG32(CP_ME_RAM_WADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) for (i = 0; i < fw_size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) WREG32(CP_ME_RAM_DATA, le32_to_cpup(fw_data++));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) WREG32(CP_ME_RAM_WADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) const __be32 *fw_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) /* PFP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) fw_data = (const __be32 *)rdev->pfp_fw->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) WREG32(CP_PFP_UCODE_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) for (i = 0; i < SI_PFP_UCODE_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) WREG32(CP_PFP_UCODE_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) /* CE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) fw_data = (const __be32 *)rdev->ce_fw->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) WREG32(CP_CE_UCODE_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) for (i = 0; i < SI_CE_UCODE_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) WREG32(CP_CE_UCODE_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) /* ME */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) fw_data = (const __be32 *)rdev->me_fw->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) WREG32(CP_ME_RAM_WADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) for (i = 0; i < SI_PM4_UCODE_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) WREG32(CP_ME_RAM_WADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) WREG32(CP_PFP_UCODE_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) WREG32(CP_CE_UCODE_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) WREG32(CP_ME_RAM_WADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) WREG32(CP_ME_RAM_RADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) static int si_cp_start(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) int r, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) r = radeon_ring_lock(rdev, ring, 7 + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) /* init the CP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) radeon_ring_write(ring, 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) radeon_ring_write(ring, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) radeon_ring_write(ring, rdev->config.si.max_hw_contexts - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) radeon_ring_write(ring, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) radeon_ring_write(ring, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) /* init the CE partitions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) radeon_ring_write(ring, 0xc000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) radeon_ring_write(ring, 0xe000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) radeon_ring_unlock_commit(rdev, ring, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) si_cp_enable(rdev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) r = radeon_ring_lock(rdev, ring, si_default_size + 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) /* setup clear context state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) for (i = 0; i < si_default_size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) radeon_ring_write(ring, si_default_state[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) /* set clear context state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) radeon_ring_write(ring, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) radeon_ring_write(ring, 0x00000316);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) radeon_ring_unlock_commit(rdev, ring, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) ring = &rdev->ring[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) r = radeon_ring_lock(rdev, ring, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) /* clear the compute context state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) radeon_ring_write(ring, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) radeon_ring_unlock_commit(rdev, ring, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) static void si_cp_fini(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) struct radeon_ring *ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) si_cp_enable(rdev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) radeon_ring_fini(rdev, ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) radeon_scratch_free(rdev, ring->rptr_save_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) radeon_ring_fini(rdev, ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) radeon_scratch_free(rdev, ring->rptr_save_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) radeon_ring_fini(rdev, ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) radeon_scratch_free(rdev, ring->rptr_save_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) static int si_cp_resume(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) struct radeon_ring *ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) u32 rb_bufsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) si_enable_gui_idle_interrupt(rdev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) WREG32(CP_SEM_WAIT_TIMER, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) /* Set the write pointer delay */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) WREG32(CP_RB_WPTR_DELAY, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) WREG32(CP_DEBUG, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) /* ring 0 - compute and gfx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) /* Set ring buffer size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) rb_bufsz = order_base_2(ring->ring_size / 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) #ifdef __BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) tmp |= BUF_SWAP_32BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) WREG32(CP_RB0_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) /* Initialize the ring buffer's read and write pointers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) ring->wptr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) WREG32(CP_RB0_WPTR, ring->wptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) /* set the wb address whether it's enabled or not */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) if (rdev->wb.enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) WREG32(SCRATCH_UMSK, 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) tmp |= RB_NO_UPDATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) WREG32(SCRATCH_UMSK, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) WREG32(CP_RB0_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) WREG32(CP_RB0_BASE, ring->gpu_addr >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) /* ring1 - compute only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) /* Set ring buffer size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) rb_bufsz = order_base_2(ring->ring_size / 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) #ifdef __BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) tmp |= BUF_SWAP_32BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) WREG32(CP_RB1_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) /* Initialize the ring buffer's read and write pointers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) ring->wptr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) WREG32(CP_RB1_WPTR, ring->wptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) /* set the wb address whether it's enabled or not */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) WREG32(CP_RB1_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) WREG32(CP_RB1_BASE, ring->gpu_addr >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) /* ring2 - compute only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) /* Set ring buffer size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) rb_bufsz = order_base_2(ring->ring_size / 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) #ifdef __BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) tmp |= BUF_SWAP_32BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) WREG32(CP_RB2_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) /* Initialize the ring buffer's read and write pointers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) ring->wptr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) WREG32(CP_RB2_WPTR, ring->wptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) /* set the wb address whether it's enabled or not */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) WREG32(CP_RB2_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) WREG32(CP_RB2_BASE, ring->gpu_addr >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) /* start the rings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) si_cp_start(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP1_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP2_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) si_enable_gui_idle_interrupt(rdev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) u32 si_gpu_check_soft_reset(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) u32 reset_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) /* GRBM_STATUS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) tmp = RREG32(GRBM_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) if (tmp & (PA_BUSY | SC_BUSY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) BCI_BUSY | SX_BUSY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) TA_BUSY | VGT_BUSY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) DB_BUSY | CB_BUSY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) GDS_BUSY | SPI_BUSY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) IA_BUSY | IA_BUSY_NO_DMA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) reset_mask |= RADEON_RESET_GFX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) CP_BUSY | CP_COHERENCY_BUSY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) reset_mask |= RADEON_RESET_CP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) if (tmp & GRBM_EE_BUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) /* GRBM_STATUS2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) tmp = RREG32(GRBM_STATUS2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) reset_mask |= RADEON_RESET_RLC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) /* DMA_STATUS_REG 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) tmp = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) if (!(tmp & DMA_IDLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) reset_mask |= RADEON_RESET_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) /* DMA_STATUS_REG 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) tmp = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) if (!(tmp & DMA_IDLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) reset_mask |= RADEON_RESET_DMA1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) /* SRBM_STATUS2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) tmp = RREG32(SRBM_STATUS2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) if (tmp & DMA_BUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) reset_mask |= RADEON_RESET_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) if (tmp & DMA1_BUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) reset_mask |= RADEON_RESET_DMA1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) /* SRBM_STATUS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) tmp = RREG32(SRBM_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) if (tmp & IH_BUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) reset_mask |= RADEON_RESET_IH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) if (tmp & SEM_BUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) reset_mask |= RADEON_RESET_SEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) if (tmp & GRBM_RQ_PENDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) reset_mask |= RADEON_RESET_GRBM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) if (tmp & VMC_BUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) reset_mask |= RADEON_RESET_VMC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) MCC_BUSY | MCD_BUSY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) reset_mask |= RADEON_RESET_MC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) if (evergreen_is_display_hung(rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) reset_mask |= RADEON_RESET_DISPLAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) /* VM_L2_STATUS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) tmp = RREG32(VM_L2_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) if (tmp & L2_BUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) reset_mask |= RADEON_RESET_VMC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) /* Skip MC reset as it's mostly likely not hung, just busy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) if (reset_mask & RADEON_RESET_MC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) reset_mask &= ~RADEON_RESET_MC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) return reset_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) static void si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) struct evergreen_mc_save save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) if (reset_mask == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) evergreen_print_gpu_status_regs(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) /* disable PG/CG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) si_fini_pg(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) si_fini_cg(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) /* stop the rlc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) si_rlc_stop(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) /* Disable CP parsing/prefetching */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) if (reset_mask & RADEON_RESET_DMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) /* dma0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) tmp &= ~DMA_RB_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) if (reset_mask & RADEON_RESET_DMA1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) /* dma1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) tmp &= ~DMA_RB_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) udelay(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) evergreen_mc_stop(rdev, &save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) if (evergreen_mc_wait_for_idle(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) grbm_soft_reset = SOFT_RESET_CB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) SOFT_RESET_DB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) SOFT_RESET_GDS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) SOFT_RESET_PA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) SOFT_RESET_SC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) SOFT_RESET_BCI |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) SOFT_RESET_SPI |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) SOFT_RESET_SX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) SOFT_RESET_TC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) SOFT_RESET_TA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) SOFT_RESET_VGT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) SOFT_RESET_IA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) if (reset_mask & RADEON_RESET_CP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) srbm_soft_reset |= SOFT_RESET_GRBM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) if (reset_mask & RADEON_RESET_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) srbm_soft_reset |= SOFT_RESET_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) if (reset_mask & RADEON_RESET_DMA1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) srbm_soft_reset |= SOFT_RESET_DMA1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) if (reset_mask & RADEON_RESET_DISPLAY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) srbm_soft_reset |= SOFT_RESET_DC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) if (reset_mask & RADEON_RESET_RLC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) grbm_soft_reset |= SOFT_RESET_RLC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) if (reset_mask & RADEON_RESET_SEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) srbm_soft_reset |= SOFT_RESET_SEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) if (reset_mask & RADEON_RESET_IH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) srbm_soft_reset |= SOFT_RESET_IH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) if (reset_mask & RADEON_RESET_GRBM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) srbm_soft_reset |= SOFT_RESET_GRBM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) if (reset_mask & RADEON_RESET_VMC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) srbm_soft_reset |= SOFT_RESET_VMC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) if (reset_mask & RADEON_RESET_MC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) srbm_soft_reset |= SOFT_RESET_MC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) if (grbm_soft_reset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) tmp = RREG32(GRBM_SOFT_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) tmp |= grbm_soft_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) WREG32(GRBM_SOFT_RESET, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) tmp = RREG32(GRBM_SOFT_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) udelay(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) tmp &= ~grbm_soft_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) WREG32(GRBM_SOFT_RESET, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) tmp = RREG32(GRBM_SOFT_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) if (srbm_soft_reset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) tmp = RREG32(SRBM_SOFT_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) tmp |= srbm_soft_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) WREG32(SRBM_SOFT_RESET, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) tmp = RREG32(SRBM_SOFT_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) udelay(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) tmp &= ~srbm_soft_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) WREG32(SRBM_SOFT_RESET, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) tmp = RREG32(SRBM_SOFT_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) /* Wait a little for things to settle down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) udelay(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) evergreen_mc_resume(rdev, &save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) udelay(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) evergreen_print_gpu_status_regs(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) static void si_set_clk_bypass_mode(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) u32 tmp, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) tmp = RREG32(CG_SPLL_FUNC_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) tmp |= SPLL_BYPASS_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) WREG32(CG_SPLL_FUNC_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) tmp |= SPLL_CTLREQ_CHG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) for (i = 0; i < rdev->usec_timeout; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) if (RREG32(SPLL_STATUS) & SPLL_CHG_STATUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) tmp &= ~(SPLL_CTLREQ_CHG | SCLK_MUX_UPDATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) tmp = RREG32(MPLL_CNTL_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) tmp &= ~MPLL_MCLK_SEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) WREG32(MPLL_CNTL_MODE, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) static void si_spll_powerdown(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) tmp = RREG32(SPLL_CNTL_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) tmp |= SPLL_SW_DIR_CONTROL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) WREG32(SPLL_CNTL_MODE, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) tmp = RREG32(CG_SPLL_FUNC_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) tmp |= SPLL_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) WREG32(CG_SPLL_FUNC_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) tmp = RREG32(CG_SPLL_FUNC_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) tmp |= SPLL_SLEEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) WREG32(CG_SPLL_FUNC_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) tmp = RREG32(SPLL_CNTL_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) tmp &= ~SPLL_SW_DIR_CONTROL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) WREG32(SPLL_CNTL_MODE, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) static void si_gpu_pci_config_reset(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) struct evergreen_mc_save save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) u32 tmp, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) dev_info(rdev->dev, "GPU pci config reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) /* disable dpm? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) /* disable cg/pg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) si_fini_pg(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) si_fini_cg(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) /* Disable CP parsing/prefetching */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) /* dma0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) tmp &= ~DMA_RB_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) /* dma1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) tmp &= ~DMA_RB_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) /* XXX other engines? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) /* halt the rlc, disable cp internal ints */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) si_rlc_stop(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) udelay(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) /* disable mem access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) evergreen_mc_stop(rdev, &save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) if (evergreen_mc_wait_for_idle(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) dev_warn(rdev->dev, "Wait for MC idle timed out !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) /* set mclk/sclk to bypass */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) si_set_clk_bypass_mode(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) /* powerdown spll */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) si_spll_powerdown(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) /* disable BM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) pci_clear_master(rdev->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) /* reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) radeon_pci_config_reset(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) /* wait for asic to come out of reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) for (i = 0; i < rdev->usec_timeout; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) int si_asic_reset(struct radeon_device *rdev, bool hard)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) u32 reset_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) if (hard) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) si_gpu_pci_config_reset(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) reset_mask = si_gpu_check_soft_reset(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) if (reset_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) r600_set_bios_scratch_engine_hung(rdev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) /* try soft reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) si_gpu_soft_reset(rdev, reset_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) reset_mask = si_gpu_check_soft_reset(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) /* try pci config reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) if (reset_mask && radeon_hard_reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) si_gpu_pci_config_reset(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) reset_mask = si_gpu_check_soft_reset(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) if (!reset_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) r600_set_bios_scratch_engine_hung(rdev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) * si_gfx_is_lockup - Check if the GFX engine is locked up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) * @rdev: radeon_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) * @ring: radeon_ring structure holding ring information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) * Check if the GFX engine is locked up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) * Returns true if the engine appears to be locked up, false if not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) u32 reset_mask = si_gpu_check_soft_reset(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) if (!(reset_mask & (RADEON_RESET_GFX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) RADEON_RESET_COMPUTE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) RADEON_RESET_CP))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) radeon_ring_lockup_update(rdev, ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) return radeon_ring_test_lockup(rdev, ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) /* MC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) static void si_mc_program(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) struct evergreen_mc_save save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) /* Initialize HDP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) for (i = 0, j = 0; i < 32; i++, j += 0x18) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) WREG32((0x2c14 + j), 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) WREG32((0x2c18 + j), 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) WREG32((0x2c1c + j), 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) WREG32((0x2c20 + j), 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) WREG32((0x2c24 + j), 0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) evergreen_mc_stop(rdev, &save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) if (radeon_mc_wait_for_idle(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) if (!ASIC_IS_NODCE(rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) /* Lockout access through VGA aperture*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) /* Update configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) rdev->mc.vram_start >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) rdev->mc.vram_end >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) rdev->vram_scratch.gpu_addr >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) WREG32(MC_VM_FB_LOCATION, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) /* XXX double check these! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) WREG32(MC_VM_AGP_BASE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) if (radeon_mc_wait_for_idle(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) evergreen_mc_resume(rdev, &save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) if (!ASIC_IS_NODCE(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) /* we need to own VRAM, so turn off the VGA renderer here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) * to stop it overwriting our objects */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) rv515_vga_render_disable(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) void si_vram_gtt_location(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) struct radeon_mc *mc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) if (mc->mc_vram_size > 0xFFC0000000ULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) /* leave room for at least 1024M GTT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) dev_warn(rdev->dev, "limiting VRAM\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) mc->real_vram_size = 0xFFC0000000ULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) mc->mc_vram_size = 0xFFC0000000ULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) radeon_vram_location(rdev, &rdev->mc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) rdev->mc.gtt_base_align = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) radeon_gtt_location(rdev, mc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) static int si_mc_init(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) int chansize, numchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) /* Get VRAM informations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) rdev->mc.vram_is_ddr = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) tmp = RREG32(MC_ARB_RAMCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) if (tmp & CHANSIZE_OVERRIDE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) chansize = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) } else if (tmp & CHANSIZE_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) chansize = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) chansize = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) tmp = RREG32(MC_SHARED_CHMAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) numchan = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) numchan = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) numchan = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) numchan = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) numchan = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) case 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) numchan = 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) case 6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) numchan = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) case 7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) numchan = 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) numchan = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) rdev->mc.vram_width = numchan * chansize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) /* Could aper size report 0 ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) /* size in MB on si */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) tmp = RREG32(CONFIG_MEMSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) /* some boards may have garbage in the upper 16 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) if (tmp & 0xffff0000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) DRM_INFO("Probable bad vram size: 0x%08x\n", tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) if (tmp & 0xffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) tmp &= 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) rdev->mc.mc_vram_size = tmp * 1024ULL * 1024ULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) rdev->mc.real_vram_size = rdev->mc.mc_vram_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) rdev->mc.visible_vram_size = rdev->mc.aper_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) si_vram_gtt_location(rdev, &rdev->mc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) radeon_update_bandwidth_info(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) * GART
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) void si_pcie_gart_tlb_flush(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) /* flush hdp cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) /* bits 0-15 are the VM contexts0-15 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) WREG32(VM_INVALIDATE_REQUEST, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) static int si_pcie_gart_enable(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) int r, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) if (rdev->gart.robj == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) r = radeon_gart_table_vram_pin(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) /* Setup TLB control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) WREG32(MC_VM_MX_L1_TLB_CNTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) (0xA << 7) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) ENABLE_L1_TLB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) ENABLE_L1_FRAGMENT_PROCESSING |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) SYSTEM_ACCESS_MODE_NOT_IN_SYS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) ENABLE_ADVANCED_DRIVER_MODEL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) /* Setup L2 cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) ENABLE_L2_FRAGMENT_PROCESSING |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) EFFECTIVE_L2_QUEUE_SIZE(7) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) CONTEXT1_IDENTITY_ACCESS_MODE(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) BANK_SELECT(4) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) L2_CACHE_BIGK_FRAGMENT_SIZE(4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) /* setup context0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) (u32)(rdev->dummy_page.addr >> 12));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) WREG32(VM_CONTEXT0_CNTL2, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) RANGE_PROTECTION_FAULT_ENABLE_DEFAULT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) WREG32(0x15D4, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) WREG32(0x15D8, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) WREG32(0x15DC, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) /* empty context1-15 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) /* set vm size, must be a multiple of 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) /* Assign the pt base to something valid for now; the pts used for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) * the VMs are determined by the application and setup and assigned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335) * on the fly in the vm part of radeon_gart.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) for (i = 1; i < 16; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) if (i < 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) rdev->vm_manager.saved_table_addr[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) rdev->vm_manager.saved_table_addr[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) /* enable context1-15 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) (u32)(rdev->dummy_page.addr >> 12));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) WREG32(VM_CONTEXT1_CNTL2, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) PAGE_TABLE_BLOCK_SIZE(radeon_vm_block_size - 9) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) READ_PROTECTION_FAULT_ENABLE_DEFAULT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) si_pcie_gart_tlb_flush(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) (unsigned)(rdev->mc.gtt_size >> 20),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) (unsigned long long)rdev->gart.table_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) rdev->gart.ready = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) static void si_pcie_gart_disable(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) for (i = 1; i < 16; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) uint32_t reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) if (i < 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) rdev->vm_manager.saved_table_addr[i] = RREG32(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) /* Disable all tables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) WREG32(VM_CONTEXT0_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) WREG32(VM_CONTEXT1_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) /* Setup TLB control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) /* Setup L2 cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) EFFECTIVE_L2_QUEUE_SIZE(7) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) CONTEXT1_IDENTITY_ACCESS_MODE(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) WREG32(VM_L2_CNTL2, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) L2_CACHE_BIGK_FRAGMENT_SIZE(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) radeon_gart_table_vram_unpin(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) static void si_pcie_gart_fini(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) si_pcie_gart_disable(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406) radeon_gart_table_vram_free(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) radeon_gart_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) /* vm parser */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) static bool si_vm_reg_valid(u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) /* context regs are fine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) if (reg >= 0x28000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) /* shader regs are also fine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) if (reg >= 0xB000 && reg < 0xC000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) /* check config regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) switch (reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) case GRBM_GFX_INDEX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) case CP_STRMOUT_CNTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) case VGT_VTX_VECT_EJECT_REG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) case VGT_CACHE_INVALIDATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) case VGT_ESGS_RING_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) case VGT_GSVS_RING_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) case VGT_GS_VERTEX_REUSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) case VGT_PRIMITIVE_TYPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) case VGT_INDEX_TYPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) case VGT_NUM_INDICES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) case VGT_NUM_INSTANCES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) case VGT_TF_RING_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) case VGT_HS_OFFCHIP_PARAM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) case VGT_TF_MEMORY_BASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) case PA_CL_ENHANCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) case PA_SU_LINE_STIPPLE_VALUE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) case PA_SC_LINE_STIPPLE_STATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) case PA_SC_ENHANCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) case SQC_CACHES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) case SPI_STATIC_THREAD_MGMT_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) case SPI_STATIC_THREAD_MGMT_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) case SPI_STATIC_THREAD_MGMT_3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) case SPI_PS_MAX_WAVE_ID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) case SPI_CONFIG_CNTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) case SPI_CONFIG_CNTL_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) case TA_CNTL_AUX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) case TA_CS_BC_BASE_ADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) DRM_ERROR("Invalid register 0x%x in CS\n", reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) static int si_vm_packet3_ce_check(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) u32 *ib, struct radeon_cs_packet *pkt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460) switch (pkt->opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) case PACKET3_NOP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) case PACKET3_SET_BASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) case PACKET3_SET_CE_DE_COUNTERS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) case PACKET3_LOAD_CONST_RAM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) case PACKET3_WRITE_CONST_RAM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) case PACKET3_WRITE_CONST_RAM_OFFSET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) case PACKET3_DUMP_CONST_RAM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) case PACKET3_INCREMENT_CE_COUNTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) case PACKET3_WAIT_ON_DE_COUNTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) case PACKET3_CE_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473) DRM_ERROR("Invalid CE packet3: 0x%x\n", pkt->opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) static int si_vm_packet3_cp_dma_check(u32 *ib, u32 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) u32 start_reg, reg, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) u32 command = ib[idx + 4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) u32 info = ib[idx + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) u32 idx_value = ib[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485) if (command & PACKET3_CP_DMA_CMD_SAS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) /* src address space is register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) if (((info & 0x60000000) >> 29) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) start_reg = idx_value << 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) if (command & PACKET3_CP_DMA_CMD_SAIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) reg = start_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) if (!si_vm_reg_valid(reg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492) DRM_ERROR("CP DMA Bad SRC register\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496) for (i = 0; i < (command & 0x1fffff); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497) reg = start_reg + (4 * i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498) if (!si_vm_reg_valid(reg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499) DRM_ERROR("CP DMA Bad SRC register\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506) if (command & PACKET3_CP_DMA_CMD_DAS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507) /* dst address space is register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508) if (((info & 0x00300000) >> 20) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509) start_reg = ib[idx + 2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510) if (command & PACKET3_CP_DMA_CMD_DAIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511) reg = start_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512) if (!si_vm_reg_valid(reg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513) DRM_ERROR("CP DMA Bad DST register\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517) for (i = 0; i < (command & 0x1fffff); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) reg = start_reg + (4 * i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519) if (!si_vm_reg_valid(reg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520) DRM_ERROR("CP DMA Bad DST register\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530) static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531) u32 *ib, struct radeon_cs_packet *pkt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534) u32 idx = pkt->idx + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535) u32 idx_value = ib[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536) u32 start_reg, end_reg, reg, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538) switch (pkt->opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539) case PACKET3_NOP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540) case PACKET3_SET_BASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541) case PACKET3_CLEAR_STATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542) case PACKET3_INDEX_BUFFER_SIZE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543) case PACKET3_DISPATCH_DIRECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544) case PACKET3_DISPATCH_INDIRECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545) case PACKET3_ALLOC_GDS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546) case PACKET3_WRITE_GDS_RAM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547) case PACKET3_ATOMIC_GDS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548) case PACKET3_ATOMIC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549) case PACKET3_OCCLUSION_QUERY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550) case PACKET3_SET_PREDICATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551) case PACKET3_COND_EXEC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552) case PACKET3_PRED_EXEC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553) case PACKET3_DRAW_INDIRECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554) case PACKET3_DRAW_INDEX_INDIRECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555) case PACKET3_INDEX_BASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556) case PACKET3_DRAW_INDEX_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557) case PACKET3_CONTEXT_CONTROL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558) case PACKET3_INDEX_TYPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559) case PACKET3_DRAW_INDIRECT_MULTI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560) case PACKET3_DRAW_INDEX_AUTO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561) case PACKET3_DRAW_INDEX_IMMD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562) case PACKET3_NUM_INSTANCES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563) case PACKET3_DRAW_INDEX_MULTI_AUTO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564) case PACKET3_STRMOUT_BUFFER_UPDATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565) case PACKET3_DRAW_INDEX_OFFSET_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566) case PACKET3_DRAW_INDEX_MULTI_ELEMENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567) case PACKET3_DRAW_INDEX_INDIRECT_MULTI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568) case PACKET3_MPEG_INDEX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569) case PACKET3_WAIT_REG_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570) case PACKET3_MEM_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571) case PACKET3_PFP_SYNC_ME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572) case PACKET3_SURFACE_SYNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573) case PACKET3_EVENT_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574) case PACKET3_EVENT_WRITE_EOP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575) case PACKET3_EVENT_WRITE_EOS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576) case PACKET3_SET_CONTEXT_REG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577) case PACKET3_SET_CONTEXT_REG_INDIRECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578) case PACKET3_SET_SH_REG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579) case PACKET3_SET_SH_REG_OFFSET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580) case PACKET3_INCREMENT_DE_COUNTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581) case PACKET3_WAIT_ON_CE_COUNTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582) case PACKET3_WAIT_ON_AVAIL_BUFFER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583) case PACKET3_ME_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585) case PACKET3_COPY_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586) if ((idx_value & 0xf00) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587) reg = ib[idx + 3] * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588) if (!si_vm_reg_valid(reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592) case PACKET3_WRITE_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593) if ((idx_value & 0xf00) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594) start_reg = ib[idx + 1] * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595) if (idx_value & 0x10000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596) if (!si_vm_reg_valid(start_reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599) for (i = 0; i < (pkt->count - 2); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600) reg = start_reg + (4 * i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601) if (!si_vm_reg_valid(reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607) case PACKET3_COND_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608) if (idx_value & 0x100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609) reg = ib[idx + 5] * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610) if (!si_vm_reg_valid(reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614) case PACKET3_COPY_DW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615) if (idx_value & 0x2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616) reg = ib[idx + 3] * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617) if (!si_vm_reg_valid(reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621) case PACKET3_SET_CONFIG_REG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622) start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623) end_reg = 4 * pkt->count + start_reg - 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4624) if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4625) (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4626) (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4627) DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4628) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4630) for (i = 0; i < pkt->count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4631) reg = start_reg + (4 * i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4632) if (!si_vm_reg_valid(reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4633) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4635) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4636) case PACKET3_CP_DMA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4637) r = si_vm_packet3_cp_dma_check(ib, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4638) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4639) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4640) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4641) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4642) DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4643) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4645) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4648) static int si_vm_packet3_compute_check(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4649) u32 *ib, struct radeon_cs_packet *pkt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4651) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4652) u32 idx = pkt->idx + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4653) u32 idx_value = ib[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4654) u32 start_reg, reg, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4656) switch (pkt->opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4657) case PACKET3_NOP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4658) case PACKET3_SET_BASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4659) case PACKET3_CLEAR_STATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4660) case PACKET3_DISPATCH_DIRECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4661) case PACKET3_DISPATCH_INDIRECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4662) case PACKET3_ALLOC_GDS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4663) case PACKET3_WRITE_GDS_RAM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4664) case PACKET3_ATOMIC_GDS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4665) case PACKET3_ATOMIC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4666) case PACKET3_OCCLUSION_QUERY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4667) case PACKET3_SET_PREDICATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4668) case PACKET3_COND_EXEC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4669) case PACKET3_PRED_EXEC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4670) case PACKET3_CONTEXT_CONTROL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4671) case PACKET3_STRMOUT_BUFFER_UPDATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4672) case PACKET3_WAIT_REG_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4673) case PACKET3_MEM_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4674) case PACKET3_PFP_SYNC_ME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4675) case PACKET3_SURFACE_SYNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4676) case PACKET3_EVENT_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4677) case PACKET3_EVENT_WRITE_EOP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4678) case PACKET3_EVENT_WRITE_EOS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4679) case PACKET3_SET_CONTEXT_REG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4680) case PACKET3_SET_CONTEXT_REG_INDIRECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4681) case PACKET3_SET_SH_REG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4682) case PACKET3_SET_SH_REG_OFFSET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4683) case PACKET3_INCREMENT_DE_COUNTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4684) case PACKET3_WAIT_ON_CE_COUNTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4685) case PACKET3_WAIT_ON_AVAIL_BUFFER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4686) case PACKET3_ME_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4687) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4688) case PACKET3_COPY_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4689) if ((idx_value & 0xf00) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4690) reg = ib[idx + 3] * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4691) if (!si_vm_reg_valid(reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4692) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4694) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4695) case PACKET3_WRITE_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4696) if ((idx_value & 0xf00) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4697) start_reg = ib[idx + 1] * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4698) if (idx_value & 0x10000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4699) if (!si_vm_reg_valid(start_reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4700) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4701) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4702) for (i = 0; i < (pkt->count - 2); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4703) reg = start_reg + (4 * i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4704) if (!si_vm_reg_valid(reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4705) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4709) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4710) case PACKET3_COND_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4711) if (idx_value & 0x100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4712) reg = ib[idx + 5] * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4713) if (!si_vm_reg_valid(reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4714) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4716) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4717) case PACKET3_COPY_DW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4718) if (idx_value & 0x2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4719) reg = ib[idx + 3] * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4720) if (!si_vm_reg_valid(reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4721) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4723) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4724) case PACKET3_CP_DMA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4725) r = si_vm_packet3_cp_dma_check(ib, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4726) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4727) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4728) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4729) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4730) DRM_ERROR("Invalid Compute packet3: 0x%x\n", pkt->opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4731) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4733) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4736) int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4738) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4739) u32 idx = 0, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4740) struct radeon_cs_packet pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4742) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4743) pkt.idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4744) pkt.type = RADEON_CP_PACKET_GET_TYPE(ib->ptr[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4745) pkt.count = RADEON_CP_PACKET_GET_COUNT(ib->ptr[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4746) pkt.one_reg_wr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4747) switch (pkt.type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4748) case RADEON_PACKET_TYPE0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4749) dev_err(rdev->dev, "Packet0 not allowed!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4750) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4751) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4752) case RADEON_PACKET_TYPE2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4753) idx += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4754) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4755) case RADEON_PACKET_TYPE3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4756) pkt.opcode = RADEON_CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4757) if (ib->is_const_ib)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4758) ret = si_vm_packet3_ce_check(rdev, ib->ptr, &pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4759) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4760) switch (ib->ring) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4761) case RADEON_RING_TYPE_GFX_INDEX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4762) ret = si_vm_packet3_gfx_check(rdev, ib->ptr, &pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4763) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4764) case CAYMAN_RING_TYPE_CP1_INDEX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4765) case CAYMAN_RING_TYPE_CP2_INDEX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4766) ret = si_vm_packet3_compute_check(rdev, ib->ptr, &pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4767) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4768) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4769) dev_err(rdev->dev, "Non-PM4 ring %d !\n", ib->ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4770) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4771) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4774) idx += pkt.count + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4775) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4776) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4777) dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4778) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4779) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4781) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4782) for (i = 0; i < ib->length_dw; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4783) if (i == idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4784) printk("\t0x%08x <---\n", ib->ptr[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4785) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4786) printk("\t0x%08x\n", ib->ptr[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4788) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4790) } while (idx < ib->length_dw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4792) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4795) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4796) * vm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4797) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4798) int si_vm_init(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4799) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4800) /* number of VMs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4801) rdev->vm_manager.nvm = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4802) /* base offset of vram pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4803) rdev->vm_manager.vram_base_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4805) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4808) void si_vm_fini(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4809) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4812) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4813) * si_vm_decode_fault - print human readable fault info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4814) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4815) * @rdev: radeon_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4816) * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4817) * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4818) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4819) * Print human readable fault information (SI).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4820) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4821) static void si_vm_decode_fault(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4822) u32 status, u32 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4824) u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4825) u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4826) u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4827) char *block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4829) if (rdev->family == CHIP_TAHITI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4830) switch (mc_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4831) case 160:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4832) case 144:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4833) case 96:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4834) case 80:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4835) case 224:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4836) case 208:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4837) case 32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4838) case 16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4839) block = "CB";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4840) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4841) case 161:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4842) case 145:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4843) case 97:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4844) case 81:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4845) case 225:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4846) case 209:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4847) case 33:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4848) case 17:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4849) block = "CB_FMASK";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4850) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4851) case 162:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4852) case 146:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4853) case 98:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4854) case 82:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4855) case 226:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4856) case 210:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4857) case 34:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4858) case 18:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4859) block = "CB_CMASK";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4860) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4861) case 163:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4862) case 147:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4863) case 99:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4864) case 83:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4865) case 227:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4866) case 211:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4867) case 35:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4868) case 19:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4869) block = "CB_IMMED";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4870) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4871) case 164:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4872) case 148:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4873) case 100:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4874) case 84:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4875) case 228:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4876) case 212:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4877) case 36:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4878) case 20:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4879) block = "DB";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4880) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4881) case 165:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4882) case 149:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4883) case 101:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4884) case 85:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4885) case 229:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4886) case 213:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4887) case 37:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4888) case 21:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4889) block = "DB_HTILE";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4890) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4891) case 167:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4892) case 151:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4893) case 103:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4894) case 87:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4895) case 231:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4896) case 215:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4897) case 39:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4898) case 23:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4899) block = "DB_STEN";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4900) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4901) case 72:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4902) case 68:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4903) case 64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4904) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4905) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4906) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4907) case 136:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4908) case 132:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4909) case 128:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4910) case 200:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4911) case 196:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4912) case 192:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4913) block = "TC";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4914) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4915) case 112:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4916) case 48:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4917) block = "CP";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4918) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4919) case 49:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4920) case 177:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4921) case 50:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4922) case 178:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4923) block = "SH";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4924) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4925) case 53:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4926) case 190:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4927) block = "VGT";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4928) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4929) case 117:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4930) block = "IH";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4931) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4932) case 51:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4933) case 115:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4934) block = "RLC";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4935) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4936) case 119:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4937) case 183:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4938) block = "DMA0";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4939) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4940) case 61:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4941) block = "DMA1";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4942) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4943) case 248:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4944) case 120:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4945) block = "HDP";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4946) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4947) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4948) block = "unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4949) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4951) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4952) switch (mc_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4953) case 32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4954) case 16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4955) case 96:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4956) case 80:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4957) case 160:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4958) case 144:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4959) case 224:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4960) case 208:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4961) block = "CB";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4962) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4963) case 33:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4964) case 17:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4965) case 97:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4966) case 81:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4967) case 161:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4968) case 145:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4969) case 225:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4970) case 209:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4971) block = "CB_FMASK";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4972) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4973) case 34:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4974) case 18:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4975) case 98:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4976) case 82:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4977) case 162:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4978) case 146:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4979) case 226:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4980) case 210:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4981) block = "CB_CMASK";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4982) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4983) case 35:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4984) case 19:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4985) case 99:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4986) case 83:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4987) case 163:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4988) case 147:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4989) case 227:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4990) case 211:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4991) block = "CB_IMMED";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4992) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4993) case 36:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4994) case 20:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4995) case 100:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4996) case 84:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4997) case 164:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4998) case 148:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4999) case 228:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5000) case 212:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5001) block = "DB";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5002) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5003) case 37:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5004) case 21:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5005) case 101:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5006) case 85:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5007) case 165:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5008) case 149:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5009) case 229:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5010) case 213:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5011) block = "DB_HTILE";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5012) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5013) case 39:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5014) case 23:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5015) case 103:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5016) case 87:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5017) case 167:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5018) case 151:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5019) case 231:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5020) case 215:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5021) block = "DB_STEN";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5022) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5023) case 72:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5024) case 68:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5025) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5026) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5027) case 136:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5028) case 132:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5029) case 200:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5030) case 196:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5031) block = "TC";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5032) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5033) case 112:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5034) case 48:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5035) block = "CP";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5036) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5037) case 49:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5038) case 177:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5039) case 50:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5040) case 178:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5041) block = "SH";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5042) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5043) case 53:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5044) block = "VGT";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5045) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5046) case 117:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5047) block = "IH";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5048) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5049) case 51:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5050) case 115:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5051) block = "RLC";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5052) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5053) case 119:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5054) case 183:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5055) block = "DMA0";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5056) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5057) case 61:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5058) block = "DMA1";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5059) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5060) case 248:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5061) case 120:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5062) block = "HDP";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5063) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5064) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5065) block = "unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5066) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5070) printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5071) protections, vmid, addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5072) (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5073) block, mc_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5076) void si_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5077) unsigned vm_id, uint64_t pd_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5078) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5079) /* write new base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5080) radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5081) radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5082) WRITE_DATA_DST_SEL(0)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5084) if (vm_id < 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5085) radeon_ring_write(ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5086) (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5087) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5088) radeon_ring_write(ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5089) (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5091) radeon_ring_write(ring, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5092) radeon_ring_write(ring, pd_addr >> 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5094) /* flush hdp cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5095) radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5096) radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5097) WRITE_DATA_DST_SEL(0)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5098) radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5099) radeon_ring_write(ring, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5100) radeon_ring_write(ring, 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5102) /* bits 0-15 are the VM contexts0-15 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5103) radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5104) radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5105) WRITE_DATA_DST_SEL(0)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5106) radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5107) radeon_ring_write(ring, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5108) radeon_ring_write(ring, 1 << vm_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5110) /* wait for the invalidate to complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5111) radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5112) radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) | /* always */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5113) WAIT_REG_MEM_ENGINE(0))); /* me */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5114) radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5115) radeon_ring_write(ring, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5116) radeon_ring_write(ring, 0); /* ref */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5117) radeon_ring_write(ring, 0); /* mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5118) radeon_ring_write(ring, 0x20); /* poll interval */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5120) /* sync PFP to ME, otherwise we might get invalid PFP reads */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5121) radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5122) radeon_ring_write(ring, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5125) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5126) * Power and clock gating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5127) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5128) static void si_wait_for_rlc_serdes(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5130) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5132) for (i = 0; i < rdev->usec_timeout; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5133) if (RREG32(RLC_SERDES_MASTER_BUSY_0) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5134) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5135) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5138) for (i = 0; i < rdev->usec_timeout; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5139) if (RREG32(RLC_SERDES_MASTER_BUSY_1) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5140) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5141) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5145) static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5146) bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5148) u32 tmp = RREG32(CP_INT_CNTL_RING0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5149) u32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5150) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5152) if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5153) tmp |= (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5154) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5155) tmp &= ~(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5156) WREG32(CP_INT_CNTL_RING0, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5158) if (!enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5159) /* read a gfx register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5160) tmp = RREG32(DB_DEPTH_INFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5162) mask = RLC_BUSY_STATUS | GFX_POWER_STATUS | GFX_CLOCK_STATUS | GFX_LS_STATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5163) for (i = 0; i < rdev->usec_timeout; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5164) if ((RREG32(RLC_STAT) & mask) == (GFX_CLOCK_STATUS | GFX_POWER_STATUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5165) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5166) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5171) static void si_set_uvd_dcm(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5172) bool sw_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5174) u32 tmp, tmp2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5176) tmp = RREG32(UVD_CGC_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5177) tmp &= ~(CLK_OD_MASK | CG_DT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5178) tmp |= DCM | CG_DT(1) | CLK_OD(4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5180) if (sw_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5181) tmp &= ~0x7ffff800;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5182) tmp2 = DYN_OR_EN | DYN_RR_EN | G_DIV_ID(7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5183) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5184) tmp |= 0x7ffff800;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5185) tmp2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5188) WREG32(UVD_CGC_CTRL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5189) WREG32_UVD_CTX(UVD_CGC_CTRL2, tmp2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5192) void si_init_uvd_internal_cg(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5194) bool hw_mode = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5196) if (hw_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5197) si_set_uvd_dcm(rdev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5198) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5199) u32 tmp = RREG32(UVD_CGC_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5200) tmp &= ~DCM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5201) WREG32(UVD_CGC_CTRL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5205) static u32 si_halt_rlc(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5207) u32 data, orig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5209) orig = data = RREG32(RLC_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5211) if (data & RLC_ENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5212) data &= ~RLC_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5213) WREG32(RLC_CNTL, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5215) si_wait_for_rlc_serdes(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5218) return orig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5221) static void si_update_rlc(struct radeon_device *rdev, u32 rlc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5223) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5225) tmp = RREG32(RLC_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5226) if (tmp != rlc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5227) WREG32(RLC_CNTL, rlc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5230) static void si_enable_dma_pg(struct radeon_device *rdev, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5232) u32 data, orig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5234) orig = data = RREG32(DMA_PG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5235) if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5236) data |= PG_CNTL_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5237) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5238) data &= ~PG_CNTL_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5239) if (orig != data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5240) WREG32(DMA_PG, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5243) static void si_init_dma_pg(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5245) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5247) WREG32(DMA_PGFSM_WRITE, 0x00002000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5248) WREG32(DMA_PGFSM_CONFIG, 0x100010ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5250) for (tmp = 0; tmp < 5; tmp++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5251) WREG32(DMA_PGFSM_WRITE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5254) static void si_enable_gfx_cgpg(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5255) bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5257) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5259) if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5260) tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5261) WREG32(RLC_TTOP_D, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5263) tmp = RREG32(RLC_PG_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5264) tmp |= GFX_PG_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5265) WREG32(RLC_PG_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5267) tmp = RREG32(RLC_AUTO_PG_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5268) tmp |= AUTO_PG_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5269) WREG32(RLC_AUTO_PG_CTRL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5270) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5271) tmp = RREG32(RLC_AUTO_PG_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5272) tmp &= ~AUTO_PG_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5273) WREG32(RLC_AUTO_PG_CTRL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5275) tmp = RREG32(DB_RENDER_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5279) static void si_init_gfx_cgpg(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5281) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5283) WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5285) tmp = RREG32(RLC_PG_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5286) tmp |= GFX_PG_SRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5287) WREG32(RLC_PG_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5289) WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5291) tmp = RREG32(RLC_AUTO_PG_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5293) tmp &= ~GRBM_REG_SGIT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5294) tmp |= GRBM_REG_SGIT(0x700);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5295) tmp &= ~PG_AFTER_GRBM_REG_ST_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5296) WREG32(RLC_AUTO_PG_CTRL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5299) static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5301) u32 mask = 0, tmp, tmp1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5302) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5304) si_select_se_sh(rdev, se, sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5305) tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5306) tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5307) si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5309) tmp &= 0xffff0000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5311) tmp |= tmp1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5312) tmp >>= 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5314) for (i = 0; i < rdev->config.si.max_cu_per_sh; i ++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5315) mask <<= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5316) mask |= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5319) return (~tmp) & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5322) static void si_init_ao_cu_mask(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5324) u32 i, j, k, active_cu_number = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5325) u32 mask, counter, cu_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5326) u32 tmp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5328) for (i = 0; i < rdev->config.si.max_shader_engines; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5329) for (j = 0; j < rdev->config.si.max_sh_per_se; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5330) mask = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5331) cu_bitmap = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5332) counter = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5333) for (k = 0; k < rdev->config.si.max_cu_per_sh; k++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5334) if (si_get_cu_active_bitmap(rdev, i, j) & mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5335) if (counter < 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5336) cu_bitmap |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5337) counter++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5339) mask <<= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5342) active_cu_number += counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5343) tmp |= (cu_bitmap << (i * 16 + j * 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5347) WREG32(RLC_PG_AO_CU_MASK, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5349) tmp = RREG32(RLC_MAX_PG_CU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5350) tmp &= ~MAX_PU_CU_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5351) tmp |= MAX_PU_CU(active_cu_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5352) WREG32(RLC_MAX_PG_CU, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5355) static void si_enable_cgcg(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5356) bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5358) u32 data, orig, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5360) orig = data = RREG32(RLC_CGCG_CGLS_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5362) if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5363) si_enable_gui_idle_interrupt(rdev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5365) WREG32(RLC_GCPM_GENERAL_3, 0x00000080);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5367) tmp = si_halt_rlc(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5369) WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5370) WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5371) WREG32(RLC_SERDES_WR_CTRL, 0x00b000ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5373) si_wait_for_rlc_serdes(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5375) si_update_rlc(rdev, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5377) WREG32(RLC_SERDES_WR_CTRL, 0x007000ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5379) data |= CGCG_EN | CGLS_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5380) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5381) si_enable_gui_idle_interrupt(rdev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5383) RREG32(CB_CGTT_SCLK_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5384) RREG32(CB_CGTT_SCLK_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5385) RREG32(CB_CGTT_SCLK_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5386) RREG32(CB_CGTT_SCLK_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5388) data &= ~(CGCG_EN | CGLS_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5391) if (orig != data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5392) WREG32(RLC_CGCG_CGLS_CTRL, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5395) static void si_enable_mgcg(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5396) bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5398) u32 data, orig, tmp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5400) if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGCG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5401) orig = data = RREG32(CGTS_SM_CTRL_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5402) data = 0x96940200;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5403) if (orig != data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5404) WREG32(CGTS_SM_CTRL_REG, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5406) if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CP_LS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5407) orig = data = RREG32(CP_MEM_SLP_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5408) data |= CP_MEM_LS_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5409) if (orig != data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5410) WREG32(CP_MEM_SLP_CNTL, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5413) orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5414) data &= 0xffffffc0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5415) if (orig != data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5416) WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5418) tmp = si_halt_rlc(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5420) WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5421) WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5422) WREG32(RLC_SERDES_WR_CTRL, 0x00d000ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5424) si_update_rlc(rdev, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5425) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5426) orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5427) data |= 0x00000003;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5428) if (orig != data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5429) WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5431) data = RREG32(CP_MEM_SLP_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5432) if (data & CP_MEM_LS_EN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5433) data &= ~CP_MEM_LS_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5434) WREG32(CP_MEM_SLP_CNTL, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5436) orig = data = RREG32(CGTS_SM_CTRL_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5437) data |= LS_OVERRIDE | OVERRIDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5438) if (orig != data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5439) WREG32(CGTS_SM_CTRL_REG, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5441) tmp = si_halt_rlc(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5443) WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5444) WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5445) WREG32(RLC_SERDES_WR_CTRL, 0x00e000ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5447) si_update_rlc(rdev, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5451) static void si_enable_uvd_mgcg(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5452) bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5454) u32 orig, data, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5456) if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_UVD_MGCG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5457) tmp = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5458) tmp |= 0x3fff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5459) WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5461) orig = data = RREG32(UVD_CGC_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5462) data |= DCM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5463) if (orig != data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5464) WREG32(UVD_CGC_CTRL, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5466) WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5467) WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5468) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5469) tmp = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5470) tmp &= ~0x3fff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5471) WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5473) orig = data = RREG32(UVD_CGC_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5474) data &= ~DCM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5475) if (orig != data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5476) WREG32(UVD_CGC_CTRL, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5478) WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_0, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5479) WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_1, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5483) static const u32 mc_cg_registers[] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5485) MC_HUB_MISC_HUB_CG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5486) MC_HUB_MISC_SIP_CG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5487) MC_HUB_MISC_VM_CG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5488) MC_XPB_CLK_GAT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5489) ATC_MISC_CG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5490) MC_CITF_MISC_WR_CG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5491) MC_CITF_MISC_RD_CG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5492) MC_CITF_MISC_VM_CG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5493) VM_L2_CG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5494) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5496) static void si_enable_mc_ls(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5497) bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5499) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5500) u32 orig, data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5502) for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5503) orig = data = RREG32(mc_cg_registers[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5504) if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_LS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5505) data |= MC_LS_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5506) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5507) data &= ~MC_LS_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5508) if (data != orig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5509) WREG32(mc_cg_registers[i], data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5513) static void si_enable_mc_mgcg(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5514) bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5516) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5517) u32 orig, data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5519) for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5520) orig = data = RREG32(mc_cg_registers[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5521) if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_MGCG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5522) data |= MC_CG_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5523) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5524) data &= ~MC_CG_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5525) if (data != orig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5526) WREG32(mc_cg_registers[i], data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5530) static void si_enable_dma_mgcg(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5531) bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5533) u32 orig, data, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5534) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5536) if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_MGCG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5537) for (i = 0; i < 2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5538) if (i == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5539) offset = DMA0_REGISTER_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5540) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5541) offset = DMA1_REGISTER_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5542) orig = data = RREG32(DMA_POWER_CNTL + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5543) data &= ~MEM_POWER_OVERRIDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5544) if (data != orig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5545) WREG32(DMA_POWER_CNTL + offset, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5546) WREG32(DMA_CLK_CTRL + offset, 0x00000100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5548) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5549) for (i = 0; i < 2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5550) if (i == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5551) offset = DMA0_REGISTER_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5552) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5553) offset = DMA1_REGISTER_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5554) orig = data = RREG32(DMA_POWER_CNTL + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5555) data |= MEM_POWER_OVERRIDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5556) if (data != orig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5557) WREG32(DMA_POWER_CNTL + offset, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5559) orig = data = RREG32(DMA_CLK_CTRL + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5560) data = 0xff000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5561) if (data != orig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5562) WREG32(DMA_CLK_CTRL + offset, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5567) static void si_enable_bif_mgls(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5568) bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5569) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5570) u32 orig, data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5572) orig = data = RREG32_PCIE(PCIE_CNTL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5574) if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_BIF_LS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5575) data |= SLV_MEM_LS_EN | MST_MEM_LS_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5576) REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5577) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5578) data &= ~(SLV_MEM_LS_EN | MST_MEM_LS_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5579) REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5581) if (orig != data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5582) WREG32_PCIE(PCIE_CNTL2, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5585) static void si_enable_hdp_mgcg(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5586) bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5588) u32 orig, data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5590) orig = data = RREG32(HDP_HOST_PATH_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5592) if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_MGCG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5593) data &= ~CLOCK_GATING_DIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5594) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5595) data |= CLOCK_GATING_DIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5597) if (orig != data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5598) WREG32(HDP_HOST_PATH_CNTL, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5601) static void si_enable_hdp_ls(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5602) bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5604) u32 orig, data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5606) orig = data = RREG32(HDP_MEM_POWER_LS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5608) if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_LS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5609) data |= HDP_LS_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5610) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5611) data &= ~HDP_LS_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5613) if (orig != data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5614) WREG32(HDP_MEM_POWER_LS, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5617) static void si_update_cg(struct radeon_device *rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5618) u32 block, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5620) if (block & RADEON_CG_BLOCK_GFX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5621) si_enable_gui_idle_interrupt(rdev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5622) /* order matters! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5623) if (enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5624) si_enable_mgcg(rdev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5625) si_enable_cgcg(rdev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5626) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5627) si_enable_cgcg(rdev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5628) si_enable_mgcg(rdev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5630) si_enable_gui_idle_interrupt(rdev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5633) if (block & RADEON_CG_BLOCK_MC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5634) si_enable_mc_mgcg(rdev, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5635) si_enable_mc_ls(rdev, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5638) if (block & RADEON_CG_BLOCK_SDMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5639) si_enable_dma_mgcg(rdev, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5642) if (block & RADEON_CG_BLOCK_BIF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5643) si_enable_bif_mgls(rdev, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5646) if (block & RADEON_CG_BLOCK_UVD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5647) if (rdev->has_uvd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5648) si_enable_uvd_mgcg(rdev, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5652) if (block & RADEON_CG_BLOCK_HDP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5653) si_enable_hdp_mgcg(rdev, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5654) si_enable_hdp_ls(rdev, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5658) static void si_init_cg(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5660) si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5661) RADEON_CG_BLOCK_MC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5662) RADEON_CG_BLOCK_SDMA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5663) RADEON_CG_BLOCK_BIF |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5664) RADEON_CG_BLOCK_HDP), true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5665) if (rdev->has_uvd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5666) si_update_cg(rdev, RADEON_CG_BLOCK_UVD, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5667) si_init_uvd_internal_cg(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5671) static void si_fini_cg(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5673) if (rdev->has_uvd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5674) si_update_cg(rdev, RADEON_CG_BLOCK_UVD, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5676) si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5677) RADEON_CG_BLOCK_MC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5678) RADEON_CG_BLOCK_SDMA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5679) RADEON_CG_BLOCK_BIF |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5680) RADEON_CG_BLOCK_HDP), false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5683) u32 si_get_csb_size(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5684) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5685) u32 count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5686) const struct cs_section_def *sect = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5687) const struct cs_extent_def *ext = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5689) if (rdev->rlc.cs_data == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5690) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5692) /* begin clear state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5693) count += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5694) /* context control state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5695) count += 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5697) for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5698) for (ext = sect->section; ext->extent != NULL; ++ext) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5699) if (sect->id == SECT_CONTEXT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5700) count += 2 + ext->reg_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5701) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5702) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5705) /* pa_sc_raster_config */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5706) count += 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5707) /* end clear state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5708) count += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5709) /* clear state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5710) count += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5712) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5715) void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5717) u32 count = 0, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5718) const struct cs_section_def *sect = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5719) const struct cs_extent_def *ext = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5721) if (rdev->rlc.cs_data == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5722) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5723) if (buffer == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5724) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5726) buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5727) buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5729) buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5730) buffer[count++] = cpu_to_le32(0x80000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5731) buffer[count++] = cpu_to_le32(0x80000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5733) for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5734) for (ext = sect->section; ext->extent != NULL; ++ext) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5735) if (sect->id == SECT_CONTEXT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5736) buffer[count++] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5737) cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5738) buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5739) for (i = 0; i < ext->reg_count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5740) buffer[count++] = cpu_to_le32(ext->extent[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5741) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5742) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5747) buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5748) buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5749) switch (rdev->family) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5750) case CHIP_TAHITI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5751) case CHIP_PITCAIRN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5752) buffer[count++] = cpu_to_le32(0x2a00126a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5753) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5754) case CHIP_VERDE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5755) buffer[count++] = cpu_to_le32(0x0000124a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5756) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5757) case CHIP_OLAND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5758) buffer[count++] = cpu_to_le32(0x00000082);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5759) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5760) case CHIP_HAINAN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5761) buffer[count++] = cpu_to_le32(0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5762) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5763) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5764) buffer[count++] = cpu_to_le32(0x00000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5765) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5768) buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5769) buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5771) buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5772) buffer[count++] = cpu_to_le32(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5775) static void si_init_pg(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5776) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5777) if (rdev->pg_flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5778) if (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5779) si_init_dma_pg(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5781) si_init_ao_cu_mask(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5782) if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5783) si_init_gfx_cgpg(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5784) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5785) WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5786) WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5788) si_enable_dma_pg(rdev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5789) si_enable_gfx_cgpg(rdev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5790) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5791) WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5792) WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5796) static void si_fini_pg(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5797) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5798) if (rdev->pg_flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5799) si_enable_dma_pg(rdev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5800) si_enable_gfx_cgpg(rdev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5804) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5805) * RLC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5806) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5807) void si_rlc_reset(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5809) u32 tmp = RREG32(GRBM_SOFT_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5811) tmp |= SOFT_RESET_RLC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5812) WREG32(GRBM_SOFT_RESET, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5813) udelay(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5814) tmp &= ~SOFT_RESET_RLC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5815) WREG32(GRBM_SOFT_RESET, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5816) udelay(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5819) static void si_rlc_stop(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5820) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5821) WREG32(RLC_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5823) si_enable_gui_idle_interrupt(rdev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5825) si_wait_for_rlc_serdes(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5828) static void si_rlc_start(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5830) WREG32(RLC_CNTL, RLC_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5832) si_enable_gui_idle_interrupt(rdev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5834) udelay(50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5837) static bool si_lbpw_supported(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5839) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5841) /* Enable LBPW only for DDR3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5842) tmp = RREG32(MC_SEQ_MISC0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5843) if ((tmp & 0xF0000000) == 0xB0000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5844) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5845) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5848) static void si_enable_lbpw(struct radeon_device *rdev, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5850) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5852) tmp = RREG32(RLC_LB_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5853) if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5854) tmp |= LOAD_BALANCE_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5855) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5856) tmp &= ~LOAD_BALANCE_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5857) WREG32(RLC_LB_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5859) if (!enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5860) si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5861) WREG32(SPI_LB_CU_MASK, 0x00ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5865) static int si_rlc_resume(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5866) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5867) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5869) if (!rdev->rlc_fw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5870) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5872) si_rlc_stop(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5874) si_rlc_reset(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5876) si_init_pg(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5878) si_init_cg(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5880) WREG32(RLC_RL_BASE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5881) WREG32(RLC_RL_SIZE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5882) WREG32(RLC_LB_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5883) WREG32(RLC_LB_CNTR_MAX, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5884) WREG32(RLC_LB_CNTR_INIT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5885) WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5887) WREG32(RLC_MC_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5888) WREG32(RLC_UCODE_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5890) if (rdev->new_fw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5891) const struct rlc_firmware_header_v1_0 *hdr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5892) (const struct rlc_firmware_header_v1_0 *)rdev->rlc_fw->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5893) u32 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5894) const __le32 *fw_data = (const __le32 *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5895) (rdev->rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5897) radeon_ucode_print_rlc_hdr(&hdr->header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5899) for (i = 0; i < fw_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5900) WREG32(RLC_UCODE_ADDR, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5901) WREG32(RLC_UCODE_DATA, le32_to_cpup(fw_data++));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5903) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5904) const __be32 *fw_data =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5905) (const __be32 *)rdev->rlc_fw->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5906) for (i = 0; i < SI_RLC_UCODE_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5907) WREG32(RLC_UCODE_ADDR, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5908) WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5911) WREG32(RLC_UCODE_ADDR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5913) si_enable_lbpw(rdev, si_lbpw_supported(rdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5915) si_rlc_start(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5917) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5920) static void si_enable_interrupts(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5921) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5922) u32 ih_cntl = RREG32(IH_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5923) u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5925) ih_cntl |= ENABLE_INTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5926) ih_rb_cntl |= IH_RB_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5927) WREG32(IH_CNTL, ih_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5928) WREG32(IH_RB_CNTL, ih_rb_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5929) rdev->ih.enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5932) static void si_disable_interrupts(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5933) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5934) u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5935) u32 ih_cntl = RREG32(IH_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5937) ih_rb_cntl &= ~IH_RB_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5938) ih_cntl &= ~ENABLE_INTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5939) WREG32(IH_RB_CNTL, ih_rb_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5940) WREG32(IH_CNTL, ih_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5941) /* set rptr, wptr to 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5942) WREG32(IH_RB_RPTR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5943) WREG32(IH_RB_WPTR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5944) rdev->ih.enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5945) rdev->ih.rptr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5948) static void si_disable_interrupt_state(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5949) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5950) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5951) u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5953) tmp = RREG32(CP_INT_CNTL_RING0) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5954) (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5955) WREG32(CP_INT_CNTL_RING0, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5956) WREG32(CP_INT_CNTL_RING1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5957) WREG32(CP_INT_CNTL_RING2, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5958) tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5959) WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5960) tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5961) WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5962) WREG32(GRBM_INT_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5963) WREG32(SRBM_INT_CNTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5964) for (i = 0; i < rdev->num_crtc; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5965) WREG32(INT_MASK + crtc_offsets[i], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5966) for (i = 0; i < rdev->num_crtc; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5967) WREG32(GRPH_INT_CONTROL + crtc_offsets[i], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5969) if (!ASIC_IS_NODCE(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5970) WREG32(DAC_AUTODETECT_INT_CONTROL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5972) for (i = 0; i < 6; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5973) WREG32_AND(DC_HPDx_INT_CONTROL(i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5974) DC_HPDx_INT_POLARITY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5978) static int si_irq_init(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5979) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5980) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5981) int rb_bufsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5982) u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5984) /* allocate ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5985) ret = r600_ih_ring_alloc(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5986) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5987) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5989) /* disable irqs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5990) si_disable_interrupts(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5992) /* init rlc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5993) ret = si_rlc_resume(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5994) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5995) r600_ih_ring_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5996) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5999) /* setup interrupt control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6000) /* set dummy read address to dummy page address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6001) WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6002) interrupt_cntl = RREG32(INTERRUPT_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6003) /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6004) * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6005) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6006) interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6007) /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6008) interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6009) WREG32(INTERRUPT_CNTL, interrupt_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6011) WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6012) rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6014) ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6015) IH_WPTR_OVERFLOW_CLEAR |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6016) (rb_bufsz << 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6018) if (rdev->wb.enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6019) ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6021) /* set the writeback address whether it's enabled or not */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6022) WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6023) WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6025) WREG32(IH_RB_CNTL, ih_rb_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6027) /* set rptr, wptr to 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6028) WREG32(IH_RB_RPTR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6029) WREG32(IH_RB_WPTR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6031) /* Default settings for IH_CNTL (disabled at first) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6032) ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6033) /* RPTR_REARM only works if msi's are enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6034) if (rdev->msi_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6035) ih_cntl |= RPTR_REARM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6036) WREG32(IH_CNTL, ih_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6038) /* force the active interrupt state to all disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6039) si_disable_interrupt_state(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6041) pci_set_master(rdev->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6043) /* enable irqs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6044) si_enable_interrupts(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6046) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6049) /* The order we write back each register here is important */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6050) int si_irq_set(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6051) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6052) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6053) u32 cp_int_cntl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6054) u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6055) u32 grbm_int_cntl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6056) u32 dma_cntl, dma_cntl1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6057) u32 thermal_int = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6059) if (!rdev->irq.installed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6060) WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6061) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6063) /* don't enable anything if the ih is disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6064) if (!rdev->ih.enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6065) si_disable_interrupts(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6066) /* force the active interrupt state to all disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6067) si_disable_interrupt_state(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6068) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6071) cp_int_cntl = RREG32(CP_INT_CNTL_RING0) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6072) (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6074) dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6075) dma_cntl1 = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6077) thermal_int = RREG32(CG_THERMAL_INT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6078) ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6080) /* enable CP interrupts on all rings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6081) if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6082) DRM_DEBUG("si_irq_set: sw int gfx\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6083) cp_int_cntl |= TIME_STAMP_INT_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6085) if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6086) DRM_DEBUG("si_irq_set: sw int cp1\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6087) cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6089) if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6090) DRM_DEBUG("si_irq_set: sw int cp2\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6091) cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6093) if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6094) DRM_DEBUG("si_irq_set: sw int dma\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6095) dma_cntl |= TRAP_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6098) if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6099) DRM_DEBUG("si_irq_set: sw int dma1\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6100) dma_cntl1 |= TRAP_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6103) WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6104) WREG32(CP_INT_CNTL_RING1, cp_int_cntl1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6105) WREG32(CP_INT_CNTL_RING2, cp_int_cntl2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6107) WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, dma_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6108) WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, dma_cntl1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6110) WREG32(GRBM_INT_CNTL, grbm_int_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6112) if (rdev->irq.dpm_thermal) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6113) DRM_DEBUG("dpm thermal\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6114) thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6117) for (i = 0; i < rdev->num_crtc; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6118) radeon_irq_kms_set_irq_n_enabled(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6119) rdev, INT_MASK + crtc_offsets[i], VBLANK_INT_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6120) rdev->irq.crtc_vblank_int[i] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6121) atomic_read(&rdev->irq.pflip[i]), "vblank", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6124) for (i = 0; i < rdev->num_crtc; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6125) WREG32(GRPH_INT_CONTROL + crtc_offsets[i], GRPH_PFLIP_INT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6127) if (!ASIC_IS_NODCE(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6128) for (i = 0; i < 6; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6129) radeon_irq_kms_set_irq_n_enabled(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6130) rdev, DC_HPDx_INT_CONTROL(i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6131) DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6132) rdev->irq.hpd[i], "HPD", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6136) WREG32(CG_THERMAL_INT, thermal_int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6138) /* posting read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6139) RREG32(SRBM_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6141) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6144) /* The order we write back each register here is important */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6145) static inline void si_irq_ack(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6147) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6148) u32 *disp_int = rdev->irq.stat_regs.evergreen.disp_int;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6149) u32 *grph_int = rdev->irq.stat_regs.evergreen.grph_int;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6151) if (ASIC_IS_NODCE(rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6152) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6154) for (i = 0; i < 6; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6155) disp_int[i] = RREG32(si_disp_int_status[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6156) if (i < rdev->num_crtc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6157) grph_int[i] = RREG32(GRPH_INT_STATUS + crtc_offsets[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6160) /* We write back each interrupt register in pairs of two */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6161) for (i = 0; i < rdev->num_crtc; i += 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6162) for (j = i; j < (i + 2); j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6163) if (grph_int[j] & GRPH_PFLIP_INT_OCCURRED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6164) WREG32(GRPH_INT_STATUS + crtc_offsets[j],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6165) GRPH_PFLIP_INT_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6168) for (j = i; j < (i + 2); j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6169) if (disp_int[j] & LB_D1_VBLANK_INTERRUPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6170) WREG32(VBLANK_STATUS + crtc_offsets[j],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6171) VBLANK_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6172) if (disp_int[j] & LB_D1_VLINE_INTERRUPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6173) WREG32(VLINE_STATUS + crtc_offsets[j],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6174) VLINE_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6178) for (i = 0; i < 6; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6179) if (disp_int[i] & DC_HPD1_INTERRUPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6180) WREG32_OR(DC_HPDx_INT_CONTROL(i), DC_HPDx_INT_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6183) for (i = 0; i < 6; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6184) if (disp_int[i] & DC_HPD1_RX_INTERRUPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6185) WREG32_OR(DC_HPDx_INT_CONTROL(i), DC_HPDx_RX_INT_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6189) static void si_irq_disable(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6191) si_disable_interrupts(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6192) /* Wait and acknowledge irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6193) mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6194) si_irq_ack(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6195) si_disable_interrupt_state(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6198) static void si_irq_suspend(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6200) si_irq_disable(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6201) si_rlc_stop(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6204) static void si_irq_fini(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6206) si_irq_suspend(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6207) r600_ih_ring_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6210) static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6212) u32 wptr, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6214) if (rdev->wb.enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6215) wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6216) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6217) wptr = RREG32(IH_RB_WPTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6219) if (wptr & RB_OVERFLOW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6220) wptr &= ~RB_OVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6221) /* When a ring buffer overflow happen start parsing interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6222) * from the last not overwritten vector (wptr + 16). Hopefully
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6223) * this should allow us to catchup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6224) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6225) dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6226) wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6227) rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6228) tmp = RREG32(IH_RB_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6229) tmp |= IH_WPTR_OVERFLOW_CLEAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6230) WREG32(IH_RB_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6232) return (wptr & rdev->ih.ptr_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6235) /* SI IV Ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6236) * Each IV ring entry is 128 bits:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6237) * [7:0] - interrupt source id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6238) * [31:8] - reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6239) * [59:32] - interrupt source data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6240) * [63:60] - reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6241) * [71:64] - RINGID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6242) * [79:72] - VMID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6243) * [127:80] - reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6244) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6245) int si_irq_process(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6247) u32 *disp_int = rdev->irq.stat_regs.evergreen.disp_int;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6248) u32 crtc_idx, hpd_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6249) u32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6250) u32 wptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6251) u32 rptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6252) u32 src_id, src_data, ring_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6253) u32 ring_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6254) bool queue_hotplug = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6255) bool queue_dp = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6256) bool queue_thermal = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6257) u32 status, addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6258) const char *event_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6260) if (!rdev->ih.enabled || rdev->shutdown)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6261) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6263) wptr = si_get_ih_wptr(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6265) restart_ih:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6266) /* is somebody else already processing irqs? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6267) if (atomic_xchg(&rdev->ih.lock, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6268) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6270) rptr = rdev->ih.rptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6271) DRM_DEBUG("si_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6273) /* Order reading of wptr vs. reading of IH ring data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6274) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6276) /* display interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6277) si_irq_ack(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6279) while (rptr != wptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6280) /* wptr/rptr are in bytes! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6281) ring_index = rptr / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6282) src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6283) src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6284) ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6286) switch (src_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6287) case 1: /* D1 vblank/vline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6288) case 2: /* D2 vblank/vline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6289) case 3: /* D3 vblank/vline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6290) case 4: /* D4 vblank/vline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6291) case 5: /* D5 vblank/vline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6292) case 6: /* D6 vblank/vline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6293) crtc_idx = src_id - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6295) if (src_data == 0) { /* vblank */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6296) mask = LB_D1_VBLANK_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6297) event_name = "vblank";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6299) if (rdev->irq.crtc_vblank_int[crtc_idx]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6300) drm_handle_vblank(rdev->ddev, crtc_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6301) rdev->pm.vblank_sync = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6302) wake_up(&rdev->irq.vblank_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6304) if (atomic_read(&rdev->irq.pflip[crtc_idx])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6305) radeon_crtc_handle_vblank(rdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6306) crtc_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6309) } else if (src_data == 1) { /* vline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6310) mask = LB_D1_VLINE_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6311) event_name = "vline";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6312) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6313) DRM_DEBUG("Unhandled interrupt: %d %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6314) src_id, src_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6315) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6318) if (!(disp_int[crtc_idx] & mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6319) DRM_DEBUG("IH: D%d %s - IH event w/o asserted irq bit?\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6320) crtc_idx + 1, event_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6323) disp_int[crtc_idx] &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6324) DRM_DEBUG("IH: D%d %s\n", crtc_idx + 1, event_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6326) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6327) case 8: /* D1 page flip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6328) case 10: /* D2 page flip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6329) case 12: /* D3 page flip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6330) case 14: /* D4 page flip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6331) case 16: /* D5 page flip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6332) case 18: /* D6 page flip */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6333) DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6334) if (radeon_use_pflipirq > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6335) radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6336) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6337) case 42: /* HPD hotplug */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6338) if (src_data <= 5) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6339) hpd_idx = src_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6340) mask = DC_HPD1_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6341) queue_hotplug = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6342) event_name = "HPD";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6344) } else if (src_data <= 11) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6345) hpd_idx = src_data - 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6346) mask = DC_HPD1_RX_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6347) queue_dp = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6348) event_name = "HPD_RX";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6350) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6351) DRM_DEBUG("Unhandled interrupt: %d %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6352) src_id, src_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6353) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6356) if (!(disp_int[hpd_idx] & mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6357) DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6359) disp_int[hpd_idx] &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6360) DRM_DEBUG("IH: %s%d\n", event_name, hpd_idx + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6361) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6362) case 96:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6363) DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6364) WREG32(SRBM_INT_ACK, 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6365) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6366) case 124: /* UVD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6367) DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6368) radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6369) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6370) case 146:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6371) case 147:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6372) addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6373) status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6374) /* reset addr and status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6375) WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6376) if (addr == 0x0 && status == 0x0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6377) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6378) dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6379) dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6380) addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6381) dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6382) status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6383) si_vm_decode_fault(rdev, status, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6384) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6385) case 176: /* RINGID0 CP_INT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6386) radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6387) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6388) case 177: /* RINGID1 CP_INT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6389) radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6390) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6391) case 178: /* RINGID2 CP_INT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6392) radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6393) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6394) case 181: /* CP EOP event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6395) DRM_DEBUG("IH: CP EOP\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6396) switch (ring_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6397) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6398) radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6399) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6400) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6401) radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6402) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6403) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6404) radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6405) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6407) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6408) case 224: /* DMA trap event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6409) DRM_DEBUG("IH: DMA trap\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6410) radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6411) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6412) case 230: /* thermal low to high */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6413) DRM_DEBUG("IH: thermal low to high\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6414) rdev->pm.dpm.thermal.high_to_low = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6415) queue_thermal = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6416) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6417) case 231: /* thermal high to low */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6418) DRM_DEBUG("IH: thermal high to low\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6419) rdev->pm.dpm.thermal.high_to_low = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6420) queue_thermal = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6421) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6422) case 233: /* GUI IDLE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6423) DRM_DEBUG("IH: GUI idle\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6424) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6425) case 244: /* DMA trap event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6426) DRM_DEBUG("IH: DMA1 trap\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6427) radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6428) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6429) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6430) DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6431) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6434) /* wptr/rptr are in bytes! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6435) rptr += 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6436) rptr &= rdev->ih.ptr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6437) WREG32(IH_RB_RPTR, rptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6439) if (queue_dp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6440) schedule_work(&rdev->dp_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6441) if (queue_hotplug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6442) schedule_delayed_work(&rdev->hotplug_work, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6443) if (queue_thermal && rdev->pm.dpm_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6444) schedule_work(&rdev->pm.dpm.thermal.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6445) rdev->ih.rptr = rptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6446) atomic_set(&rdev->ih.lock, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6448) /* make sure wptr hasn't changed while processing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6449) wptr = si_get_ih_wptr(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6450) if (wptr != rptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6451) goto restart_ih;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6453) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6456) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6457) * startup/shutdown callbacks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6458) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6459) static void si_uvd_init(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6461) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6463) if (!rdev->has_uvd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6464) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6466) r = radeon_uvd_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6467) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6468) dev_err(rdev->dev, "failed UVD (%d) init.\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6469) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6470) * At this point rdev->uvd.vcpu_bo is NULL which trickles down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6471) * to early fails uvd_v2_2_resume() and thus nothing happens
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6472) * there. So it is pointless to try to go through that code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6473) * hence why we disable uvd here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6474) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6475) rdev->has_uvd = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6476) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6478) rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6479) r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6482) static void si_uvd_start(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6484) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6486) if (!rdev->has_uvd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6487) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6489) r = uvd_v2_2_resume(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6490) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6491) dev_err(rdev->dev, "failed UVD resume (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6492) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6494) r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6495) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6496) dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6497) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6499) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6501) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6502) rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6505) static void si_uvd_resume(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6507) struct radeon_ring *ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6508) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6510) if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6511) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6513) ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6514) r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6515) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6516) dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6517) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6519) r = uvd_v1_0_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6520) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6521) dev_err(rdev->dev, "failed initializing UVD (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6522) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6526) static void si_vce_init(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6528) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6530) if (!rdev->has_vce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6531) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6533) r = radeon_vce_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6534) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6535) dev_err(rdev->dev, "failed VCE (%d) init.\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6536) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6537) * At this point rdev->vce.vcpu_bo is NULL which trickles down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6538) * to early fails si_vce_start() and thus nothing happens
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6539) * there. So it is pointless to try to go through that code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6540) * hence why we disable vce here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6541) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6542) rdev->has_vce = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6543) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6545) rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_obj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6546) r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE1_INDEX], 4096);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6547) rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_obj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6548) r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE2_INDEX], 4096);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6551) static void si_vce_start(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6553) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6555) if (!rdev->has_vce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6556) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6558) r = radeon_vce_resume(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6559) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6560) dev_err(rdev->dev, "failed VCE resume (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6561) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6563) r = vce_v1_0_resume(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6564) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6565) dev_err(rdev->dev, "failed VCE resume (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6566) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6568) r = radeon_fence_driver_start_ring(rdev, TN_RING_TYPE_VCE1_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6569) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6570) dev_err(rdev->dev, "failed initializing VCE1 fences (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6571) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6573) r = radeon_fence_driver_start_ring(rdev, TN_RING_TYPE_VCE2_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6574) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6575) dev_err(rdev->dev, "failed initializing VCE2 fences (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6576) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6578) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6580) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6581) rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6582) rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6585) static void si_vce_resume(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6587) struct radeon_ring *ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6588) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6590) if (!rdev->has_vce || !rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6591) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6593) ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6594) r = radeon_ring_init(rdev, ring, ring->ring_size, 0, VCE_CMD_NO_OP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6595) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6596) dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6597) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6599) ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6600) r = radeon_ring_init(rdev, ring, ring->ring_size, 0, VCE_CMD_NO_OP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6601) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6602) dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6603) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6605) r = vce_v1_0_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6606) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6607) dev_err(rdev->dev, "failed initializing VCE (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6608) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6612) static int si_startup(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6614) struct radeon_ring *ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6615) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6617) /* enable pcie gen2/3 link */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6618) si_pcie_gen3_enable(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6619) /* enable aspm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6620) si_program_aspm(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6622) /* scratch needs to be initialized before MC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6623) r = r600_vram_scratch_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6624) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6625) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6627) si_mc_program(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6629) if (!rdev->pm.dpm_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6630) r = si_mc_load_microcode(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6631) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6632) DRM_ERROR("Failed to load MC firmware!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6633) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6637) r = si_pcie_gart_enable(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6638) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6639) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6640) si_gpu_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6642) /* allocate rlc buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6643) if (rdev->family == CHIP_VERDE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6644) rdev->rlc.reg_list = verde_rlc_save_restore_register_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6645) rdev->rlc.reg_list_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6646) (u32)ARRAY_SIZE(verde_rlc_save_restore_register_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6648) rdev->rlc.cs_data = si_cs_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6649) r = sumo_rlc_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6650) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6651) DRM_ERROR("Failed to init rlc BOs!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6652) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6655) /* allocate wb buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6656) r = radeon_wb_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6657) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6658) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6660) r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6661) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6662) dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6663) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6666) r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6667) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6668) dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6669) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6672) r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6673) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6674) dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6675) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6678) r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6679) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6680) dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6681) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6684) r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6685) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6686) dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6687) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6690) si_uvd_start(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6691) si_vce_start(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6693) /* Enable IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6694) if (!rdev->irq.installed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6695) r = radeon_irq_kms_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6696) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6697) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6700) r = si_irq_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6701) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6702) DRM_ERROR("radeon: IH init failed (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6703) radeon_irq_kms_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6704) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6706) si_irq_set(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6708) ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6709) r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6710) RADEON_CP_PACKET2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6711) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6712) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6714) ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6715) r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6716) RADEON_CP_PACKET2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6717) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6718) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6720) ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6721) r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6722) RADEON_CP_PACKET2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6723) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6724) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6726) ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6727) r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6728) DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6729) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6730) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6732) ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6733) r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6734) DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6735) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6736) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6738) r = si_cp_load_microcode(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6739) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6740) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6741) r = si_cp_resume(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6742) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6743) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6745) r = cayman_dma_resume(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6746) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6747) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6749) si_uvd_resume(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6750) si_vce_resume(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6752) r = radeon_ib_pool_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6753) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6754) dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6755) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6758) r = radeon_vm_manager_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6759) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6760) dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6761) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6764) r = radeon_audio_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6765) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6766) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6768) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6771) int si_resume(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6773) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6775) /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6776) * posting will perform necessary task to bring back GPU into good
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6777) * shape.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6778) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6779) /* post card */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6780) atom_asic_init(rdev->mode_info.atom_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6782) /* init golden registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6783) si_init_golden_registers(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6785) if (rdev->pm.pm_method == PM_METHOD_DPM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6786) radeon_pm_resume(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6788) rdev->accel_working = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6789) r = si_startup(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6790) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6791) DRM_ERROR("si startup failed on resume\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6792) rdev->accel_working = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6793) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6796) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6800) int si_suspend(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6801) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6802) radeon_pm_suspend(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6803) radeon_audio_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6804) radeon_vm_manager_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6805) si_cp_enable(rdev, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6806) cayman_dma_stop(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6807) if (rdev->has_uvd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6808) uvd_v1_0_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6809) radeon_uvd_suspend(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6811) if (rdev->has_vce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6812) radeon_vce_suspend(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6813) si_fini_pg(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6814) si_fini_cg(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6815) si_irq_suspend(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6816) radeon_wb_disable(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6817) si_pcie_gart_disable(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6818) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6821) /* Plan is to move initialization in that function and use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6822) * helper function so that radeon_device_init pretty much
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6823) * do nothing more than calling asic specific function. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6824) * should also allow to remove a bunch of callback function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6825) * like vram_info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6826) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6827) int si_init(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6828) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6829) struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6830) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6832) /* Read BIOS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6833) if (!radeon_get_bios(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6834) if (ASIC_IS_AVIVO(rdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6835) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6837) /* Must be an ATOMBIOS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6838) if (!rdev->is_atom_bios) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6839) dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6840) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6842) r = radeon_atombios_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6843) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6844) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6846) /* Post card if necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6847) if (!radeon_card_posted(rdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6848) if (!rdev->bios) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6849) dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6850) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6852) DRM_INFO("GPU not posted. posting now...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6853) atom_asic_init(rdev->mode_info.atom_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6855) /* init golden registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6856) si_init_golden_registers(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6857) /* Initialize scratch registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6858) si_scratch_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6859) /* Initialize surface registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6860) radeon_surface_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6861) /* Initialize clocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6862) radeon_get_clock_info(rdev->ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6864) /* Fence driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6865) r = radeon_fence_driver_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6866) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6867) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6869) /* initialize memory controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6870) r = si_mc_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6871) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6872) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6873) /* Memory manager */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6874) r = radeon_bo_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6875) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6876) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6878) if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6879) !rdev->rlc_fw || !rdev->mc_fw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6880) r = si_init_microcode(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6881) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6882) DRM_ERROR("Failed to load firmware!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6883) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6887) /* Initialize power management */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6888) radeon_pm_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6890) ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6891) ring->ring_obj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6892) r600_ring_init(rdev, ring, 1024 * 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6894) ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6895) ring->ring_obj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6896) r600_ring_init(rdev, ring, 1024 * 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6898) ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6899) ring->ring_obj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6900) r600_ring_init(rdev, ring, 1024 * 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6902) ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6903) ring->ring_obj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6904) r600_ring_init(rdev, ring, 64 * 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6906) ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6907) ring->ring_obj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6908) r600_ring_init(rdev, ring, 64 * 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6910) si_uvd_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6911) si_vce_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6913) rdev->ih.ring_obj = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6914) r600_ih_ring_init(rdev, 64 * 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6916) r = r600_pcie_gart_init(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6917) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6918) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6920) rdev->accel_working = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6921) r = si_startup(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6922) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6923) dev_err(rdev->dev, "disabling GPU acceleration\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6924) si_cp_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6925) cayman_dma_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6926) si_irq_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6927) sumo_rlc_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6928) radeon_wb_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6929) radeon_ib_pool_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6930) radeon_vm_manager_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6931) radeon_irq_kms_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6932) si_pcie_gart_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6933) rdev->accel_working = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6936) /* Don't start up if the MC ucode is missing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6937) * The default clocks and voltages before the MC ucode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6938) * is loaded are not suffient for advanced operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6939) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6940) if (!rdev->mc_fw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6941) DRM_ERROR("radeon: MC ucode required for NI+.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6942) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6945) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6948) void si_fini(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6949) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6950) radeon_pm_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6951) si_cp_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6952) cayman_dma_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6953) si_fini_pg(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6954) si_fini_cg(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6955) si_irq_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6956) sumo_rlc_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6957) radeon_wb_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6958) radeon_vm_manager_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6959) radeon_ib_pool_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6960) radeon_irq_kms_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6961) if (rdev->has_uvd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6962) uvd_v1_0_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6963) radeon_uvd_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6965) if (rdev->has_vce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6966) radeon_vce_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6967) si_pcie_gart_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6968) r600_vram_scratch_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6969) radeon_gem_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6970) radeon_fence_driver_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6971) radeon_bo_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6972) radeon_atombios_fini(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6973) kfree(rdev->bios);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6974) rdev->bios = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6977) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6978) * si_get_gpu_clock_counter - return GPU clock counter snapshot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6979) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6980) * @rdev: radeon_device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6981) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6982) * Fetches a GPU clock counter snapshot (SI).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6983) * Returns the 64 bit clock counter snapshot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6984) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6985) uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6986) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6987) uint64_t clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6989) mutex_lock(&rdev->gpu_clock_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6990) WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6991) clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6992) ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6993) mutex_unlock(&rdev->gpu_clock_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6994) return clock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6997) int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6998) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6999) unsigned fb_div = 0, vclk_div = 0, dclk_div = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7000) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7002) /* bypass vclk and dclk with bclk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7003) WREG32_P(CG_UPLL_FUNC_CNTL_2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7004) VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7005) ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7007) /* put PLL in bypass mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7008) WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7010) if (!vclk || !dclk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7011) /* keep the Bypass mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7012) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7015) r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 125000, 250000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7016) 16384, 0x03FFFFFF, 0, 128, 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7017) &fb_div, &vclk_div, &dclk_div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7018) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7019) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7021) /* set RESET_ANTI_MUX to 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7022) WREG32_P(CG_UPLL_FUNC_CNTL_5, 0, ~RESET_ANTI_MUX_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7024) /* set VCO_MODE to 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7025) WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7027) /* disable sleep mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7028) WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7030) /* deassert UPLL_RESET */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7031) WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7033) mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7035) r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7036) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7037) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7039) /* assert UPLL_RESET again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7040) WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7042) /* disable spread spectrum. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7043) WREG32_P(CG_UPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7045) /* set feedback divider */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7046) WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div), ~UPLL_FB_DIV_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7048) /* set ref divider to 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7049) WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_REF_DIV_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7051) if (fb_div < 307200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7052) WREG32_P(CG_UPLL_FUNC_CNTL_4, 0, ~UPLL_SPARE_ISPARE9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7053) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7054) WREG32_P(CG_UPLL_FUNC_CNTL_4, UPLL_SPARE_ISPARE9, ~UPLL_SPARE_ISPARE9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7056) /* set PDIV_A and PDIV_B */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7057) WREG32_P(CG_UPLL_FUNC_CNTL_2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7058) UPLL_PDIV_A(vclk_div) | UPLL_PDIV_B(dclk_div),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7059) ~(UPLL_PDIV_A_MASK | UPLL_PDIV_B_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7061) /* give the PLL some time to settle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7062) mdelay(15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7064) /* deassert PLL_RESET */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7065) WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7067) mdelay(15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7069) /* switch from bypass mode to normal mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7070) WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7072) r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7073) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7074) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7076) /* switch VCLK and DCLK selection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7077) WREG32_P(CG_UPLL_FUNC_CNTL_2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7078) VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7079) ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7081) mdelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7083) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7086) static void si_pcie_gen3_enable(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7087) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7088) struct pci_dev *root = rdev->pdev->bus->self;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7089) enum pci_bus_speed speed_cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7090) u32 speed_cntl, current_data_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7091) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7092) u16 tmp16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7094) if (pci_is_root_bus(rdev->pdev->bus))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7095) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7097) if (radeon_pcie_gen2 == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7098) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7100) if (rdev->flags & RADEON_IS_IGP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7101) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7103) if (!(rdev->flags & RADEON_IS_PCIE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7104) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7106) speed_cap = pcie_get_speed_cap(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7107) if (speed_cap == PCI_SPEED_UNKNOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7108) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7110) if ((speed_cap != PCIE_SPEED_8_0GT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7111) (speed_cap != PCIE_SPEED_5_0GT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7112) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7114) speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7115) current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7116) LC_CURRENT_DATA_RATE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7117) if (speed_cap == PCIE_SPEED_8_0GT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7118) if (current_data_rate == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7119) DRM_INFO("PCIE gen 3 link speeds already enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7120) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7122) DRM_INFO("enabling PCIE gen 3 link speeds, disable with radeon.pcie_gen2=0\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7123) } else if (speed_cap == PCIE_SPEED_5_0GT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7124) if (current_data_rate == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7125) DRM_INFO("PCIE gen 2 link speeds already enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7126) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7128) DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7131) if (!pci_is_pcie(root) || !pci_is_pcie(rdev->pdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7132) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7134) if (speed_cap == PCIE_SPEED_8_0GT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7135) /* re-try equalization if gen3 is not already enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7136) if (current_data_rate != 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7137) u16 bridge_cfg, gpu_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7138) u16 bridge_cfg2, gpu_cfg2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7139) u32 max_lw, current_lw, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7141) pcie_capability_read_word(root, PCI_EXP_LNKCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7142) &bridge_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7143) pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7144) &gpu_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7146) tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7147) pcie_capability_write_word(root, PCI_EXP_LNKCTL, tmp16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7149) tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7150) pcie_capability_write_word(rdev->pdev, PCI_EXP_LNKCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7151) tmp16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7153) tmp = RREG32_PCIE(PCIE_LC_STATUS1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7154) max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7155) current_lw = (tmp & LC_OPERATING_LINK_WIDTH_MASK) >> LC_OPERATING_LINK_WIDTH_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7157) if (current_lw < max_lw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7158) tmp = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7159) if (tmp & LC_RENEGOTIATION_SUPPORT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7160) tmp &= ~(LC_LINK_WIDTH_MASK | LC_UPCONFIGURE_DIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7161) tmp |= (max_lw << LC_LINK_WIDTH_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7162) tmp |= LC_UPCONFIGURE_SUPPORT | LC_RENEGOTIATE_EN | LC_RECONFIG_NOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7163) WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7167) for (i = 0; i < 10; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7168) /* check status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7169) pcie_capability_read_word(rdev->pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7170) PCI_EXP_DEVSTA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7171) &tmp16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7172) if (tmp16 & PCI_EXP_DEVSTA_TRPND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7173) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7175) pcie_capability_read_word(root, PCI_EXP_LNKCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7176) &bridge_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7177) pcie_capability_read_word(rdev->pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7178) PCI_EXP_LNKCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7179) &gpu_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7181) pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7182) &bridge_cfg2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7183) pcie_capability_read_word(rdev->pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7184) PCI_EXP_LNKCTL2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7185) &gpu_cfg2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7187) tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7188) tmp |= LC_SET_QUIESCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7189) WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7191) tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7192) tmp |= LC_REDO_EQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7193) WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7195) msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7197) /* linkctl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7198) pcie_capability_read_word(root, PCI_EXP_LNKCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7199) &tmp16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7200) tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7201) tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7202) pcie_capability_write_word(root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7203) PCI_EXP_LNKCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7204) tmp16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7206) pcie_capability_read_word(rdev->pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7207) PCI_EXP_LNKCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7208) &tmp16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7209) tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7210) tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7211) pcie_capability_write_word(rdev->pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7212) PCI_EXP_LNKCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7213) tmp16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7215) /* linkctl2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7216) pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7217) &tmp16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7218) tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7219) PCI_EXP_LNKCTL2_TX_MARGIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7220) tmp16 |= (bridge_cfg2 &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7221) (PCI_EXP_LNKCTL2_ENTER_COMP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7222) PCI_EXP_LNKCTL2_TX_MARGIN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7223) pcie_capability_write_word(root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7224) PCI_EXP_LNKCTL2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7225) tmp16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7227) pcie_capability_read_word(rdev->pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7228) PCI_EXP_LNKCTL2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7229) &tmp16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7230) tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7231) PCI_EXP_LNKCTL2_TX_MARGIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7232) tmp16 |= (gpu_cfg2 &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7233) (PCI_EXP_LNKCTL2_ENTER_COMP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7234) PCI_EXP_LNKCTL2_TX_MARGIN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7235) pcie_capability_write_word(rdev->pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7236) PCI_EXP_LNKCTL2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7237) tmp16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7239) tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7240) tmp &= ~LC_SET_QUIESCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7241) WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7246) /* set the link speed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7247) speed_cntl |= LC_FORCE_EN_SW_SPEED_CHANGE | LC_FORCE_DIS_HW_SPEED_CHANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7248) speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7249) WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7251) pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL2, &tmp16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7252) tmp16 &= ~PCI_EXP_LNKCTL2_TLS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7253) if (speed_cap == PCIE_SPEED_8_0GT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7254) tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7255) else if (speed_cap == PCIE_SPEED_5_0GT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7256) tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7257) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7258) tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7259) pcie_capability_write_word(rdev->pdev, PCI_EXP_LNKCTL2, tmp16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7261) speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7262) speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7263) WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7265) for (i = 0; i < rdev->usec_timeout; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7266) speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7267) if ((speed_cntl & LC_INITIATE_LINK_SPEED_CHANGE) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7268) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7269) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7273) static void si_program_aspm(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7275) u32 data, orig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7276) bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7277) bool disable_clkreq = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7279) if (radeon_aspm == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7280) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7282) if (!(rdev->flags & RADEON_IS_PCIE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7283) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7285) orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7286) data &= ~LC_XMIT_N_FTS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7287) data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7288) if (orig != data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7289) WREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7291) orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7292) data |= LC_GO_TO_RECOVERY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7293) if (orig != data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7294) WREG32_PCIE_PORT(PCIE_LC_CNTL3, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7296) orig = data = RREG32_PCIE(PCIE_P_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7297) data |= P_IGNORE_EDB_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7298) if (orig != data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7299) WREG32_PCIE(PCIE_P_CNTL, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7301) orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7302) data &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7303) data |= LC_PMI_TO_L1_DIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7304) if (!disable_l0s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7305) data |= LC_L0S_INACTIVITY(7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7307) if (!disable_l1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7308) data |= LC_L1_INACTIVITY(7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7309) data &= ~LC_PMI_TO_L1_DIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7310) if (orig != data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7311) WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7313) if (!disable_plloff_in_l1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7314) bool clk_req_support;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7316) orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7317) data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7318) data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7319) if (orig != data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7320) WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7322) orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7323) data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7324) data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7325) if (orig != data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7326) WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7328) orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7329) data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7330) data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7331) if (orig != data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7332) WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7334) orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7335) data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7336) data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7337) if (orig != data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7338) WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7340) if ((rdev->family != CHIP_OLAND) && (rdev->family != CHIP_HAINAN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7341) orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7342) data &= ~PLL_RAMP_UP_TIME_0_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7343) if (orig != data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7344) WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7346) orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7347) data &= ~PLL_RAMP_UP_TIME_1_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7348) if (orig != data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7349) WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7351) orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7352) data &= ~PLL_RAMP_UP_TIME_2_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7353) if (orig != data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7354) WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_2, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7356) orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7357) data &= ~PLL_RAMP_UP_TIME_3_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7358) if (orig != data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7359) WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_3, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7361) orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7362) data &= ~PLL_RAMP_UP_TIME_0_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7363) if (orig != data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7364) WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7366) orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7367) data &= ~PLL_RAMP_UP_TIME_1_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7368) if (orig != data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7369) WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7371) orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7372) data &= ~PLL_RAMP_UP_TIME_2_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7373) if (orig != data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7374) WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_2, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7376) orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7377) data &= ~PLL_RAMP_UP_TIME_3_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7378) if (orig != data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7379) WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_3, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7381) orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7382) data &= ~LC_DYN_LANES_PWR_STATE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7383) data |= LC_DYN_LANES_PWR_STATE(3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7384) if (orig != data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7385) WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7387) orig = data = RREG32_PIF_PHY0(PB0_PIF_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7388) data &= ~LS2_EXIT_TIME_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7389) if ((rdev->family == CHIP_OLAND) || (rdev->family == CHIP_HAINAN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7390) data |= LS2_EXIT_TIME(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7391) if (orig != data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7392) WREG32_PIF_PHY0(PB0_PIF_CNTL, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7394) orig = data = RREG32_PIF_PHY1(PB1_PIF_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7395) data &= ~LS2_EXIT_TIME_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7396) if ((rdev->family == CHIP_OLAND) || (rdev->family == CHIP_HAINAN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7397) data |= LS2_EXIT_TIME(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7398) if (orig != data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7399) WREG32_PIF_PHY1(PB1_PIF_CNTL, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7401) if (!disable_clkreq &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7402) !pci_is_root_bus(rdev->pdev->bus)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7403) struct pci_dev *root = rdev->pdev->bus->self;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7404) u32 lnkcap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7406) clk_req_support = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7407) pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7408) if (lnkcap & PCI_EXP_LNKCAP_CLKPM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7409) clk_req_support = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7410) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7411) clk_req_support = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7414) if (clk_req_support) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7415) orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7416) data |= LC_ALLOW_PDWN_IN_L1 | LC_ALLOW_PDWN_IN_L23;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7417) if (orig != data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7418) WREG32_PCIE_PORT(PCIE_LC_CNTL2, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7420) orig = data = RREG32(THM_CLK_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7421) data &= ~(CMON_CLK_SEL_MASK | TMON_CLK_SEL_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7422) data |= CMON_CLK_SEL(1) | TMON_CLK_SEL(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7423) if (orig != data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7424) WREG32(THM_CLK_CNTL, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7426) orig = data = RREG32(MISC_CLK_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7427) data &= ~(DEEP_SLEEP_CLK_SEL_MASK | ZCLK_SEL_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7428) data |= DEEP_SLEEP_CLK_SEL(1) | ZCLK_SEL(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7429) if (orig != data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7430) WREG32(MISC_CLK_CNTL, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7432) orig = data = RREG32(CG_CLKPIN_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7433) data &= ~BCLK_AS_XCLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7434) if (orig != data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7435) WREG32(CG_CLKPIN_CNTL, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7437) orig = data = RREG32(CG_CLKPIN_CNTL_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7438) data &= ~FORCE_BIF_REFCLK_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7439) if (orig != data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7440) WREG32(CG_CLKPIN_CNTL_2, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7442) orig = data = RREG32(MPLL_BYPASSCLK_SEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7443) data &= ~MPLL_CLKOUT_SEL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7444) data |= MPLL_CLKOUT_SEL(4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7445) if (orig != data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7446) WREG32(MPLL_BYPASSCLK_SEL, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7448) orig = data = RREG32(SPLL_CNTL_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7449) data &= ~SPLL_REFCLK_SEL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7450) if (orig != data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7451) WREG32(SPLL_CNTL_MODE, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7454) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7455) if (orig != data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7456) WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7459) orig = data = RREG32_PCIE(PCIE_CNTL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7460) data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | REPLAY_MEM_LS_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7461) if (orig != data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7462) WREG32_PCIE(PCIE_CNTL2, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7464) if (!disable_l0s) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7465) data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7466) if((data & LC_N_FTS_MASK) == LC_N_FTS_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7467) data = RREG32_PCIE(PCIE_LC_STATUS1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7468) if ((data & LC_REVERSE_XMIT) && (data & LC_REVERSE_RCVR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7469) orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7470) data &= ~LC_L0S_INACTIVITY_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7471) if (orig != data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7472) WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7478) static int si_vce_send_vcepll_ctlreq(struct radeon_device *rdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7480) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7482) /* make sure VCEPLL_CTLREQ is deasserted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7483) WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7485) mdelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7487) /* assert UPLL_CTLREQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7488) WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7490) /* wait for CTLACK and CTLACK2 to get asserted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7491) for (i = 0; i < 100; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7492) uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7493) if ((RREG32_SMC(CG_VCEPLL_FUNC_CNTL) & mask) == mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7494) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7495) mdelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7498) /* deassert UPLL_CTLREQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7499) WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7501) if (i == 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7502) DRM_ERROR("Timeout setting UVD clocks!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7503) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7506) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7509) int si_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7511) unsigned fb_div = 0, evclk_div = 0, ecclk_div = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7512) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7514) /* bypass evclk and ecclk with bclk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7515) WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7516) EVCLK_SRC_SEL(1) | ECCLK_SRC_SEL(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7517) ~(EVCLK_SRC_SEL_MASK | ECCLK_SRC_SEL_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7519) /* put PLL in bypass mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7520) WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_BYPASS_EN_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7521) ~VCEPLL_BYPASS_EN_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7523) if (!evclk || !ecclk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7524) /* keep the Bypass mode, put PLL to sleep */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7525) WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_SLEEP_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7526) ~VCEPLL_SLEEP_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7527) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7530) r = radeon_uvd_calc_upll_dividers(rdev, evclk, ecclk, 125000, 250000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7531) 16384, 0x03FFFFFF, 0, 128, 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7532) &fb_div, &evclk_div, &ecclk_div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7533) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7534) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7536) /* set RESET_ANTI_MUX to 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7537) WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_5, 0, ~RESET_ANTI_MUX_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7539) /* set VCO_MODE to 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7540) WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_VCO_MODE_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7541) ~VCEPLL_VCO_MODE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7543) /* toggle VCEPLL_SLEEP to 1 then back to 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7544) WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_SLEEP_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7545) ~VCEPLL_SLEEP_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7546) WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_SLEEP_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7548) /* deassert VCEPLL_RESET */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7549) WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_RESET_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7551) mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7553) r = si_vce_send_vcepll_ctlreq(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7554) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7555) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7557) /* assert VCEPLL_RESET again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7558) WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_RESET_MASK, ~VCEPLL_RESET_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7560) /* disable spread spectrum. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7561) WREG32_SMC_P(CG_VCEPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7563) /* set feedback divider */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7564) WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_3, VCEPLL_FB_DIV(fb_div), ~VCEPLL_FB_DIV_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7566) /* set ref divider to 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7567) WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_REF_DIV_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7569) /* set PDIV_A and PDIV_B */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7570) WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7571) VCEPLL_PDIV_A(evclk_div) | VCEPLL_PDIV_B(ecclk_div),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7572) ~(VCEPLL_PDIV_A_MASK | VCEPLL_PDIV_B_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7574) /* give the PLL some time to settle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7575) mdelay(15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7577) /* deassert PLL_RESET */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7578) WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_RESET_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7580) mdelay(15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7582) /* switch from bypass mode to normal mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7583) WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_BYPASS_EN_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7585) r = si_vce_send_vcepll_ctlreq(rdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7586) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7587) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7589) /* switch VCLK and DCLK selection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7590) WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7591) EVCLK_SRC_SEL(16) | ECCLK_SRC_SEL(16),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7592) ~(EVCLK_SRC_SEL_MASK | ECCLK_SRC_SEL_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7594) mdelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7596) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7597) }